diff mbox series

[1/7] cifs: smbd: Make upper layer decide when to destroy the transport

Message ID 20180507222006.20781-1-longli@linuxonhyperv.com
State New
Headers show
Series [1/7] cifs: smbd: Make upper layer decide when to destroy the transport | expand

Commit Message

Long Li May 7, 2018, 10:20 p.m. UTC
From: Long Li <longli@microsoft.com>

On transport recoonect, upper layer CIFS code destroys the current
transport and then recoonect. This code path is not used by SMBD, in that
SMBD destroys its transport on RDMA disconnect notification independent of
CIFS upper layer behavior.

This approach adds some costs to SMBD layer to handle transport shutdown
and restart, and to deal with several racing conditions on reconnecting
transport.

Re-work this code path by introducing a new smbd_destroy. This function is
called form upper layer to ask SMBD to destroy the transport. SMBD will no
longer need to destroy the transport by itself while worrying about data
transfer is in progress. The upper layer guarantees the transport is
locked.

Signed-off-by: Long Li <longli@microsoft.com>
---
 fs/cifs/connect.c   |   9 ++---
 fs/cifs/smbdirect.c | 114 +++++++++++++++++++++++++++++++++++++++++++---------
 fs/cifs/smbdirect.h |   3 +-
 3 files changed, 100 insertions(+), 26 deletions(-)

Comments

kernel test robot May 8, 2018, 3:45 a.m. UTC | #1
Hi Long,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on cifs/for-next]
[also build test WARNING on v4.17-rc4 next-20180507]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url:    https://github.com/0day-ci/linux/commits/Long-Li/cifs-smbd-Make-upper-layer-decide-when-to-destroy-the-transport/20180508-110150
base:   git://git.samba.org/sfrench/cifs-2.6.git for-next
config: i386-randconfig-a1-05080831 (attached as .config)
compiler: gcc-4.9 (Debian 4.9.4-2) 4.9.4
reproduce:
        # save the attached .config to linux build tree
        make ARCH=i386 

All warnings (new ones prefixed by >>):

   fs//cifs/connect.c: In function 'cifs_reconnect':
>> fs//cifs/connect.c:381:16: warning: passing argument 1 of 'smbd_destroy' from incompatible pointer type
      smbd_destroy(server);
                   ^
   In file included from fs//cifs/connect.c:58:0:
   fs//cifs/smbdirect.h:334:20: note: expected 'struct smbd_connection *' but argument is of type 'struct TCP_Server_Info *'
    static inline void smbd_destroy(struct smbd_connection *info) {}
                       ^
   fs//cifs/connect.c: In function 'clean_demultiplex_info':
   fs//cifs/connect.c:715:16: warning: passing argument 1 of 'smbd_destroy' from incompatible pointer type
      smbd_destroy(server);
                   ^
   In file included from fs//cifs/connect.c:58:0:
   fs//cifs/smbdirect.h:334:20: note: expected 'struct smbd_connection *' but argument is of type 'struct TCP_Server_Info *'
    static inline void smbd_destroy(struct smbd_connection *info) {}
                       ^

vim +/smbd_destroy +381 fs//cifs/connect.c

   312	
   313	static int ip_connect(struct TCP_Server_Info *server);
   314	static int generic_ip_connect(struct TCP_Server_Info *server);
   315	static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink);
   316	static void cifs_prune_tlinks(struct work_struct *work);
   317	static int cifs_setup_volume_info(struct smb_vol *volume_info, char *mount_data,
   318						const char *devname);
   319	
   320	/*
   321	 * cifs tcp session reconnection
   322	 *
   323	 * mark tcp session as reconnecting so temporarily locked
   324	 * mark all smb sessions as reconnecting for tcp session
   325	 * reconnect tcp session
   326	 * wake up waiters on reconnection? - (not needed currently)
   327	 */
   328	int
   329	cifs_reconnect(struct TCP_Server_Info *server)
   330	{
   331		int rc = 0;
   332		struct list_head *tmp, *tmp2;
   333		struct cifs_ses *ses;
   334		struct cifs_tcon *tcon;
   335		struct mid_q_entry *mid_entry;
   336		struct list_head retry_list;
   337	
   338		spin_lock(&GlobalMid_Lock);
   339		if (server->tcpStatus == CifsExiting) {
   340			/* the demux thread will exit normally
   341			next time through the loop */
   342			spin_unlock(&GlobalMid_Lock);
   343			return rc;
   344		} else
   345			server->tcpStatus = CifsNeedReconnect;
   346		spin_unlock(&GlobalMid_Lock);
   347		server->maxBuf = 0;
   348		server->max_read = 0;
   349	
   350		cifs_dbg(FYI, "Reconnecting tcp session\n");
   351	
   352		/* before reconnecting the tcp session, mark the smb session (uid)
   353			and the tid bad so they are not used until reconnected */
   354		cifs_dbg(FYI, "%s: marking sessions and tcons for reconnect\n",
   355			 __func__);
   356		spin_lock(&cifs_tcp_ses_lock);
   357		list_for_each(tmp, &server->smb_ses_list) {
   358			ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
   359			ses->need_reconnect = true;
   360			list_for_each(tmp2, &ses->tcon_list) {
   361				tcon = list_entry(tmp2, struct cifs_tcon, tcon_list);
   362				tcon->need_reconnect = true;
   363			}
   364			if (ses->tcon_ipc)
   365				ses->tcon_ipc->need_reconnect = true;
   366		}
   367		spin_unlock(&cifs_tcp_ses_lock);
   368	
   369		/* do not want to be sending data on a socket we are freeing */
   370		cifs_dbg(FYI, "%s: tearing down socket\n", __func__);
   371		mutex_lock(&server->srv_mutex);
   372		if (server->ssocket) {
   373			cifs_dbg(FYI, "State: 0x%x Flags: 0x%lx\n",
   374				 server->ssocket->state, server->ssocket->flags);
   375			kernel_sock_shutdown(server->ssocket, SHUT_WR);
   376			cifs_dbg(FYI, "Post shutdown state: 0x%x Flags: 0x%lx\n",
   377				 server->ssocket->state, server->ssocket->flags);
   378			sock_release(server->ssocket);
   379			server->ssocket = NULL;
   380		} else if (cifs_rdma_enabled(server))
 > 381			smbd_destroy(server);
   382		server->sequence_number = 0;
   383		server->session_estab = false;
   384		kfree(server->session_key.response);
   385		server->session_key.response = NULL;
   386		server->session_key.len = 0;
   387		server->lstrp = jiffies;
   388	
   389		/* mark submitted MIDs for retry and issue callback */
   390		INIT_LIST_HEAD(&retry_list);
   391		cifs_dbg(FYI, "%s: moving mids to private list\n", __func__);
   392		spin_lock(&GlobalMid_Lock);
   393		list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
   394			mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
   395			if (mid_entry->mid_state == MID_REQUEST_SUBMITTED)
   396				mid_entry->mid_state = MID_RETRY_NEEDED;
   397			list_move(&mid_entry->qhead, &retry_list);
   398		}
   399		spin_unlock(&GlobalMid_Lock);
   400		mutex_unlock(&server->srv_mutex);
   401	
   402		cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__);
   403		list_for_each_safe(tmp, tmp2, &retry_list) {
   404			mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
   405			list_del_init(&mid_entry->qhead);
   406			mid_entry->callback(mid_entry);
   407		}
   408	
   409		do {
   410			try_to_freeze();
   411	
   412			/* we should try only the port we connected to before */
   413			mutex_lock(&server->srv_mutex);
   414			if (cifs_rdma_enabled(server))
   415				rc = smbd_reconnect(server);
   416			else
   417				rc = generic_ip_connect(server);
   418			if (rc) {
   419				cifs_dbg(FYI, "reconnect error %d\n", rc);
   420				mutex_unlock(&server->srv_mutex);
   421				msleep(3000);
   422			} else {
   423				atomic_inc(&tcpSesReconnectCount);
   424				spin_lock(&GlobalMid_Lock);
   425				if (server->tcpStatus != CifsExiting)
   426					server->tcpStatus = CifsNeedNegotiate;
   427				spin_unlock(&GlobalMid_Lock);
   428				mutex_unlock(&server->srv_mutex);
   429			}
   430		} while (server->tcpStatus == CifsNeedReconnect);
   431	
   432		if (server->tcpStatus == CifsNeedNegotiate)
   433			mod_delayed_work(cifsiod_wq, &server->echo, 0);
   434	
   435		return rc;
   436	}
   437	

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation
kernel test robot May 8, 2018, 5:24 a.m. UTC | #2
Hi Long,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on cifs/for-next]
[also build test ERROR on v4.17-rc4 next-20180507]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url:    https://github.com/0day-ci/linux/commits/Long-Li/cifs-smbd-Make-upper-layer-decide-when-to-destroy-the-transport/20180508-110150
base:   git://git.samba.org/sfrench/cifs-2.6.git for-next
config: i386-randconfig-x013-201818 (attached as .config)
compiler: gcc-7 (Debian 7.3.0-16) 7.3.0
reproduce:
        # save the attached .config to linux build tree
        make ARCH=i386 

All errors (new ones prefixed by >>):

   fs//cifs/connect.c: In function 'cifs_reconnect':
>> fs//cifs/connect.c:381:16: error: passing argument 1 of 'smbd_destroy' from incompatible pointer type [-Werror=incompatible-pointer-types]
      smbd_destroy(server);
                   ^~~~~~
   In file included from fs//cifs/connect.c:58:0:
   fs//cifs/smbdirect.h:334:20: note: expected 'struct smbd_connection *' but argument is of type 'struct TCP_Server_Info *'
    static inline void smbd_destroy(struct smbd_connection *info) {}
                       ^~~~~~~~~~~~
   fs//cifs/connect.c: In function 'clean_demultiplex_info':
   fs//cifs/connect.c:715:16: error: passing argument 1 of 'smbd_destroy' from incompatible pointer type [-Werror=incompatible-pointer-types]
      smbd_destroy(server);
                   ^~~~~~
   In file included from fs//cifs/connect.c:58:0:
   fs//cifs/smbdirect.h:334:20: note: expected 'struct smbd_connection *' but argument is of type 'struct TCP_Server_Info *'
    static inline void smbd_destroy(struct smbd_connection *info) {}
                       ^~~~~~~~~~~~
   Cyclomatic Complexity 5 include/linux/compiler.h:__read_once_size
   Cyclomatic Complexity 5 include/linux/compiler.h:__write_once_size
   Cyclomatic Complexity 1 include/linux/kasan-checks.h:kasan_check_read
   Cyclomatic Complexity 1 include/linux/kasan-checks.h:kasan_check_write
   Cyclomatic Complexity 2 arch/x86/include/asm/bitops.h:set_bit
   Cyclomatic Complexity 2 arch/x86/include/asm/bitops.h:clear_bit
   Cyclomatic Complexity 1 arch/x86/include/asm/bitops.h:test_and_set_bit
   Cyclomatic Complexity 1 arch/x86/include/asm/bitops.h:constant_test_bit
   Cyclomatic Complexity 1 arch/x86/include/asm/bitops.h:variable_test_bit
   Cyclomatic Complexity 1 arch/x86/include/asm/bitops.h:fls
   Cyclomatic Complexity 1 include/uapi/linux/byteorder/little_endian.h:__le16_to_cpup
   Cyclomatic Complexity 1 include/linux/log2.h:__ilog2_u32
   Cyclomatic Complexity 1 include/linux/list.h:INIT_LIST_HEAD
   Cyclomatic Complexity 1 include/linux/list.h:__list_add_valid
   Cyclomatic Complexity 1 include/linux/list.h:__list_del_entry_valid
   Cyclomatic Complexity 2 include/linux/list.h:__list_add
   Cyclomatic Complexity 1 include/linux/list.h:list_add
   Cyclomatic Complexity 1 include/linux/list.h:__list_del
   Cyclomatic Complexity 2 include/linux/list.h:__list_del_entry
   Cyclomatic Complexity 1 include/linux/list.h:list_del_init
   Cyclomatic Complexity 1 include/linux/list.h:list_move
   Cyclomatic Complexity 1 include/linux/list.h:list_empty
   Cyclomatic Complexity 1 arch/x86/include/asm/current.h:get_current
   Cyclomatic Complexity 1 include/linux/string.h:strnlen
   Cyclomatic Complexity 4 include/linux/string.h:strlen
   Cyclomatic Complexity 6 include/linux/string.h:strlcpy
   Cyclomatic Complexity 3 include/linux/string.h:memset
   Cyclomatic Complexity 4 include/linux/string.h:memcpy
   Cyclomatic Complexity 2 include/linux/string.h:strcpy
   Cyclomatic Complexity 1 include/asm-generic/getorder.h:__get_order
   Cyclomatic Complexity 1 include/linux/err.h:ERR_PTR
   Cyclomatic Complexity 1 include/linux/err.h:PTR_ERR
   Cyclomatic Complexity 1 arch/x86/include/asm/atomic.h:arch_atomic_read
   Cyclomatic Complexity 1 arch/x86/include/asm/atomic.h:arch_atomic_inc
   Cyclomatic Complexity 1 arch/x86/include/asm/atomic.h:arch_atomic_dec_and_test
   Cyclomatic Complexity 1 arch/x86/include/asm/atomic.h:arch_atomic_add_return
   Cyclomatic Complexity 1 arch/x86/include/asm/atomic.h:arch_atomic_sub_return
   Cyclomatic Complexity 1 include/asm-generic/atomic-instrumented.h:atomic_read
   Cyclomatic Complexity 1 include/asm-generic/atomic-instrumented.h:atomic_inc
   Cyclomatic Complexity 1 include/asm-generic/atomic-instrumented.h:atomic_inc_return
   Cyclomatic Complexity 1 include/asm-generic/atomic-instrumented.h:atomic_dec_return
   Cyclomatic Complexity 1 include/asm-generic/atomic-instrumented.h:atomic_dec_and_test
   Cyclomatic Complexity 5 arch/x86/include/asm/preempt.h:__preempt_count_sub
   Cyclomatic Complexity 1 include/linux/spinlock.h:spinlock_check
   Cyclomatic Complexity 1 include/linux/spinlock.h:spin_lock
   Cyclomatic Complexity 1 include/linux/spinlock.h:spin_unlock
   Cyclomatic Complexity 1 include/linux/uidgid.h:__kuid_val
   Cyclomatic Complexity 1 include/linux/uidgid.h:__kgid_val
   Cyclomatic Complexity 1 include/linux/uidgid.h:uid_eq
   Cyclomatic Complexity 1 include/linux/uidgid.h:gid_eq
   Cyclomatic Complexity 1 include/linux/uidgid.h:uid_gt
   Cyclomatic Complexity 1 include/linux/uidgid.h:uid_lt
   Cyclomatic Complexity 1 include/linux/uidgid.h:uid_valid
   Cyclomatic Complexity 1 include/linux/uidgid.h:gid_valid
   Cyclomatic Complexity 1 include/linux/uidgid.h:make_kuid
   Cyclomatic Complexity 1 include/linux/uidgid.h:make_kgid
   Cyclomatic Complexity 1 include/linux/uidgid.h:from_kuid
   Cyclomatic Complexity 1 include/linux/rbtree.h:rb_link_node
   Cyclomatic Complexity 1 include/linux/debug_locks.h:debug_check_no_locks_held
   Cyclomatic Complexity 1 include/linux/workqueue.h:__init_work
   Cyclomatic Complexity 1 include/linux/uio.h:iov_iter_count
   Cyclomatic Complexity 1 include/linux/socket.h:msg_data_left
   Cyclomatic Complexity 1 include/linux/sched.h:task_pid_nr
   Cyclomatic Complexity 1 include/linux/sched.h:task_thread_info
   Cyclomatic Complexity 1 include/linux/cred.h:current_user_ns
   Cyclomatic Complexity 1 include/linux/kasan.h:kasan_kmalloc
   Cyclomatic Complexity 28 include/linux/slab.h:kmalloc_index
   Cyclomatic Complexity 1 include/linux/slab.h:kmem_cache_alloc_trace
   Cyclomatic Complexity 1 include/linux/slab.h:kmalloc_order_trace
   Cyclomatic Complexity 67 include/linux/slab.h:kmalloc_large
   Cyclomatic Complexity 5 include/linux/slab.h:kmalloc
   Cyclomatic Complexity 1 include/linux/slab.h:kzalloc
   Cyclomatic Complexity 2 include/linux/ctype.h:__toupper
   Cyclomatic Complexity 1 include/linux/utsname.h:utsname
   Cyclomatic Complexity 1 include/net/net_namespace.h:get_net
   Cyclomatic Complexity 1 include/net/net_namespace.h:put_net
   Cyclomatic Complexity 1 include/net/net_namespace.h:net_eq
   Cyclomatic Complexity 1 include/linux/module.h:__module_get
   Cyclomatic Complexity 1 include/linux/module.h:module_put
   Cyclomatic Complexity 1 include/keys/user-type.h:user_key_payload_locked
   Cyclomatic Complexity 1 include/net/ipv6.h:ipv6_addr_equal
   Cyclomatic Complexity 1 include/linux/unaligned/access_ok.h:get_unaligned_le16
   Cyclomatic Complexity 1 fs//cifs/cifspdu.h:BCC
   Cyclomatic Complexity 1 fs//cifs/cifspdu.h:get_bcc
   Cyclomatic Complexity 1 fs//cifs/cifsglob.h:set_credits
   Cyclomatic Complexity 1 fs//cifs/cifsglob.h:get_next_mid

vim +/smbd_destroy +381 fs//cifs/connect.c

   312	
   313	static int ip_connect(struct TCP_Server_Info *server);
   314	static int generic_ip_connect(struct TCP_Server_Info *server);
   315	static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink);
   316	static void cifs_prune_tlinks(struct work_struct *work);
   317	static int cifs_setup_volume_info(struct smb_vol *volume_info, char *mount_data,
   318						const char *devname);
   319	
   320	/*
   321	 * cifs tcp session reconnection
   322	 *
   323	 * mark tcp session as reconnecting so temporarily locked
   324	 * mark all smb sessions as reconnecting for tcp session
   325	 * reconnect tcp session
   326	 * wake up waiters on reconnection? - (not needed currently)
   327	 */
   328	int
   329	cifs_reconnect(struct TCP_Server_Info *server)
   330	{
   331		int rc = 0;
   332		struct list_head *tmp, *tmp2;
   333		struct cifs_ses *ses;
   334		struct cifs_tcon *tcon;
   335		struct mid_q_entry *mid_entry;
   336		struct list_head retry_list;
   337	
   338		spin_lock(&GlobalMid_Lock);
   339		if (server->tcpStatus == CifsExiting) {
   340			/* the demux thread will exit normally
   341			next time through the loop */
   342			spin_unlock(&GlobalMid_Lock);
   343			return rc;
   344		} else
   345			server->tcpStatus = CifsNeedReconnect;
   346		spin_unlock(&GlobalMid_Lock);
   347		server->maxBuf = 0;
   348		server->max_read = 0;
   349	
   350		cifs_dbg(FYI, "Reconnecting tcp session\n");
   351	
   352		/* before reconnecting the tcp session, mark the smb session (uid)
   353			and the tid bad so they are not used until reconnected */
   354		cifs_dbg(FYI, "%s: marking sessions and tcons for reconnect\n",
   355			 __func__);
   356		spin_lock(&cifs_tcp_ses_lock);
   357		list_for_each(tmp, &server->smb_ses_list) {
   358			ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
   359			ses->need_reconnect = true;
   360			list_for_each(tmp2, &ses->tcon_list) {
   361				tcon = list_entry(tmp2, struct cifs_tcon, tcon_list);
   362				tcon->need_reconnect = true;
   363			}
   364			if (ses->tcon_ipc)
   365				ses->tcon_ipc->need_reconnect = true;
   366		}
   367		spin_unlock(&cifs_tcp_ses_lock);
   368	
   369		/* do not want to be sending data on a socket we are freeing */
   370		cifs_dbg(FYI, "%s: tearing down socket\n", __func__);
   371		mutex_lock(&server->srv_mutex);
   372		if (server->ssocket) {
   373			cifs_dbg(FYI, "State: 0x%x Flags: 0x%lx\n",
   374				 server->ssocket->state, server->ssocket->flags);
   375			kernel_sock_shutdown(server->ssocket, SHUT_WR);
   376			cifs_dbg(FYI, "Post shutdown state: 0x%x Flags: 0x%lx\n",
   377				 server->ssocket->state, server->ssocket->flags);
   378			sock_release(server->ssocket);
   379			server->ssocket = NULL;
   380		} else if (cifs_rdma_enabled(server))
 > 381			smbd_destroy(server);
   382		server->sequence_number = 0;
   383		server->session_estab = false;
   384		kfree(server->session_key.response);
   385		server->session_key.response = NULL;
   386		server->session_key.len = 0;
   387		server->lstrp = jiffies;
   388	
   389		/* mark submitted MIDs for retry and issue callback */
   390		INIT_LIST_HEAD(&retry_list);
   391		cifs_dbg(FYI, "%s: moving mids to private list\n", __func__);
   392		spin_lock(&GlobalMid_Lock);
   393		list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
   394			mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
   395			if (mid_entry->mid_state == MID_REQUEST_SUBMITTED)
   396				mid_entry->mid_state = MID_RETRY_NEEDED;
   397			list_move(&mid_entry->qhead, &retry_list);
   398		}
   399		spin_unlock(&GlobalMid_Lock);
   400		mutex_unlock(&server->srv_mutex);
   401	
   402		cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__);
   403		list_for_each_safe(tmp, tmp2, &retry_list) {
   404			mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
   405			list_del_init(&mid_entry->qhead);
   406			mid_entry->callback(mid_entry);
   407		}
   408	
   409		do {
   410			try_to_freeze();
   411	
   412			/* we should try only the port we connected to before */
   413			mutex_lock(&server->srv_mutex);
   414			if (cifs_rdma_enabled(server))
   415				rc = smbd_reconnect(server);
   416			else
   417				rc = generic_ip_connect(server);
   418			if (rc) {
   419				cifs_dbg(FYI, "reconnect error %d\n", rc);
   420				mutex_unlock(&server->srv_mutex);
   421				msleep(3000);
   422			} else {
   423				atomic_inc(&tcpSesReconnectCount);
   424				spin_lock(&GlobalMid_Lock);
   425				if (server->tcpStatus != CifsExiting)
   426					server->tcpStatus = CifsNeedNegotiate;
   427				spin_unlock(&GlobalMid_Lock);
   428				mutex_unlock(&server->srv_mutex);
   429			}
   430		} while (server->tcpStatus == CifsNeedReconnect);
   431	
   432		if (server->tcpStatus == CifsNeedNegotiate)
   433			mod_delayed_work(cifsiod_wq, &server->echo, 0);
   434	
   435		return rc;
   436	}
   437	

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation
Long Li May 8, 2018, 7:29 p.m. UTC | #3
> Subject: Re: [PATCH 1/7] cifs: smbd: Make upper layer decide when to
> destroy the transport
> 
> Hi Long,
> 
> Thank you for the patch! Yet something to improve:
> 
> [auto build test ERROR on cifs/for-next] [also build test ERROR on v4.17-rc4
> next-20180507] [if your patch is applied to the wrong git tree, please drop us
> a note to help improve the system]
> 
> url:
> https://na01.safelinks.protection.outlook.com/?url=https%3A%2F%2Fgithub
> .com%2F0day-ci%2Flinux%2Fcommits%2FLong-Li%2Fcifs-smbd-Make-upper-
> layer-decide-when-to-destroy-the-transport%2F20180508-
> 110150&data=02%7C01%7Clongli%40microsoft.com%7C8eeef6813ee14ded2
> dcc08d5b4a4113a%7C72f988bf86f141af91ab2d7cd011db47%7C1%7C0%7C636
> 613539125415461&sdata=6EzmQWCVBK9EESOC3UQwrObR9AL9W5u660M4k
> bDvoJw%3D&reserved=0
> base:   git://git.samba.org/sfrench/cifs-2.6.git for-next
> config: i386-randconfig-x013-201818 (attached as .config)
> compiler: gcc-7 (Debian 7.3.0-16) 7.3.0
> reproduce:
>         # save the attached .config to linux build tree
>         make ARCH=i386
> 
> All errors (new ones prefixed by >>):
> 
>    fs//cifs/connect.c: In function 'cifs_reconnect':
> >> fs//cifs/connect.c:381:16: error: passing argument 1 of
> >> 'smbd_destroy' from incompatible pointer type
> >> [-Werror=incompatible-pointer-types]
>       smbd_destroy(server);
>                    ^~~~~~

Thanks! I will send v2.

>    In file included from fs//cifs/connect.c:58:0:
>    fs//cifs/smbdirect.h:334:20: note: expected 'struct smbd_connection *' but
> argument is of type 'struct TCP_Server_Info *'
>     static inline void smbd_destroy(struct smbd_connection *info) {}
>                        ^~~~~~~~~~~~
>    fs//cifs/connect.c: In function 'clean_demultiplex_info':
>    fs//cifs/connect.c:715:16: error: passing argument 1 of 'smbd_destroy' from
> incompatible pointer type [-Werror=incompatible-pointer-types]
>       smbd_destroy(server);
>                    ^~~~~~
>    In file included from fs//cifs/connect.c:58:0:
>    fs//cifs/smbdirect.h:334:20: note: expected 'struct smbd_connection *' but
> argument is of type 'struct TCP_Server_Info *'
>     static inline void smbd_destroy(struct smbd_connection *info) {}
>                        ^~~~~~~~~~~~
>    Cyclomatic Complexity 5 include/linux/compiler.h:__read_once_size
>    Cyclomatic Complexity 5 include/linux/compiler.h:__write_once_size
>    Cyclomatic Complexity 1 include/linux/kasan-checks.h:kasan_check_read
>    Cyclomatic Complexity 1 include/linux/kasan-checks.h:kasan_check_write
>    Cyclomatic Complexity 2 arch/x86/include/asm/bitops.h:set_bit
>    Cyclomatic Complexity 2 arch/x86/include/asm/bitops.h:clear_bit
>    Cyclomatic Complexity 1 arch/x86/include/asm/bitops.h:test_and_set_bit
>    Cyclomatic Complexity 1 arch/x86/include/asm/bitops.h:constant_test_bit
>    Cyclomatic Complexity 1 arch/x86/include/asm/bitops.h:variable_test_bit
>    Cyclomatic Complexity 1 arch/x86/include/asm/bitops.h:fls
>    Cyclomatic Complexity 1
> include/uapi/linux/byteorder/little_endian.h:__le16_to_cpup
>    Cyclomatic Complexity 1 include/linux/log2.h:__ilog2_u32
>    Cyclomatic Complexity 1 include/linux/list.h:INIT_LIST_HEAD
>    Cyclomatic Complexity 1 include/linux/list.h:__list_add_valid
>    Cyclomatic Complexity 1 include/linux/list.h:__list_del_entry_valid
>    Cyclomatic Complexity 2 include/linux/list.h:__list_add
>    Cyclomatic Complexity 1 include/linux/list.h:list_add
>    Cyclomatic Complexity 1 include/linux/list.h:__list_del
>    Cyclomatic Complexity 2 include/linux/list.h:__list_del_entry
>    Cyclomatic Complexity 1 include/linux/list.h:list_del_init
>    Cyclomatic Complexity 1 include/linux/list.h:list_move
>    Cyclomatic Complexity 1 include/linux/list.h:list_empty
>    Cyclomatic Complexity 1 arch/x86/include/asm/current.h:get_current
>    Cyclomatic Complexity 1 include/linux/string.h:strnlen
>    Cyclomatic Complexity 4 include/linux/string.h:strlen
>    Cyclomatic Complexity 6 include/linux/string.h:strlcpy
>    Cyclomatic Complexity 3 include/linux/string.h:memset
>    Cyclomatic Complexity 4 include/linux/string.h:memcpy
>    Cyclomatic Complexity 2 include/linux/string.h:strcpy
>    Cyclomatic Complexity 1 include/asm-generic/getorder.h:__get_order
>    Cyclomatic Complexity 1 include/linux/err.h:ERR_PTR
>    Cyclomatic Complexity 1 include/linux/err.h:PTR_ERR
>    Cyclomatic Complexity 1 arch/x86/include/asm/atomic.h:arch_atomic_read
>    Cyclomatic Complexity 1 arch/x86/include/asm/atomic.h:arch_atomic_inc
>    Cyclomatic Complexity 1
> arch/x86/include/asm/atomic.h:arch_atomic_dec_and_test
>    Cyclomatic Complexity 1
> arch/x86/include/asm/atomic.h:arch_atomic_add_return
>    Cyclomatic Complexity 1
> arch/x86/include/asm/atomic.h:arch_atomic_sub_return
>    Cyclomatic Complexity 1 include/asm-generic/atomic-
> instrumented.h:atomic_read
>    Cyclomatic Complexity 1 include/asm-generic/atomic-
> instrumented.h:atomic_inc
>    Cyclomatic Complexity 1 include/asm-generic/atomic-
> instrumented.h:atomic_inc_return
>    Cyclomatic Complexity 1 include/asm-generic/atomic-
> instrumented.h:atomic_dec_return
>    Cyclomatic Complexity 1 include/asm-generic/atomic-
> instrumented.h:atomic_dec_and_test
>    Cyclomatic Complexity 5
> arch/x86/include/asm/preempt.h:__preempt_count_sub
>    Cyclomatic Complexity 1 include/linux/spinlock.h:spinlock_check
>    Cyclomatic Complexity 1 include/linux/spinlock.h:spin_lock
>    Cyclomatic Complexity 1 include/linux/spinlock.h:spin_unlock
>    Cyclomatic Complexity 1 include/linux/uidgid.h:__kuid_val
>    Cyclomatic Complexity 1 include/linux/uidgid.h:__kgid_val
>    Cyclomatic Complexity 1 include/linux/uidgid.h:uid_eq
>    Cyclomatic Complexity 1 include/linux/uidgid.h:gid_eq
>    Cyclomatic Complexity 1 include/linux/uidgid.h:uid_gt
>    Cyclomatic Complexity 1 include/linux/uidgid.h:uid_lt
>    Cyclomatic Complexity 1 include/linux/uidgid.h:uid_valid
>    Cyclomatic Complexity 1 include/linux/uidgid.h:gid_valid
>    Cyclomatic Complexity 1 include/linux/uidgid.h:make_kuid
>    Cyclomatic Complexity 1 include/linux/uidgid.h:make_kgid
>    Cyclomatic Complexity 1 include/linux/uidgid.h:from_kuid
>    Cyclomatic Complexity 1 include/linux/rbtree.h:rb_link_node
>    Cyclomatic Complexity 1
> include/linux/debug_locks.h:debug_check_no_locks_held
>    Cyclomatic Complexity 1 include/linux/workqueue.h:__init_work
>    Cyclomatic Complexity 1 include/linux/uio.h:iov_iter_count
>    Cyclomatic Complexity 1 include/linux/socket.h:msg_data_left
>    Cyclomatic Complexity 1 include/linux/sched.h:task_pid_nr
>    Cyclomatic Complexity 1 include/linux/sched.h:task_thread_info
>    Cyclomatic Complexity 1 include/linux/cred.h:current_user_ns
>    Cyclomatic Complexity 1 include/linux/kasan.h:kasan_kmalloc
>    Cyclomatic Complexity 28 include/linux/slab.h:kmalloc_index
>    Cyclomatic Complexity 1 include/linux/slab.h:kmem_cache_alloc_trace
>    Cyclomatic Complexity 1 include/linux/slab.h:kmalloc_order_trace
>    Cyclomatic Complexity 67 include/linux/slab.h:kmalloc_large
>    Cyclomatic Complexity 5 include/linux/slab.h:kmalloc
>    Cyclomatic Complexity 1 include/linux/slab.h:kzalloc
>    Cyclomatic Complexity 2 include/linux/ctype.h:__toupper
>    Cyclomatic Complexity 1 include/linux/utsname.h:utsname
>    Cyclomatic Complexity 1 include/net/net_namespace.h:get_net
>    Cyclomatic Complexity 1 include/net/net_namespace.h:put_net
>    Cyclomatic Complexity 1 include/net/net_namespace.h:net_eq
>    Cyclomatic Complexity 1 include/linux/module.h:__module_get
>    Cyclomatic Complexity 1 include/linux/module.h:module_put
>    Cyclomatic Complexity 1 include/keys/user-
> type.h:user_key_payload_locked
>    Cyclomatic Complexity 1 include/net/ipv6.h:ipv6_addr_equal
>    Cyclomatic Complexity 1
> include/linux/unaligned/access_ok.h:get_unaligned_le16
>    Cyclomatic Complexity 1 fs//cifs/cifspdu.h:BCC
>    Cyclomatic Complexity 1 fs//cifs/cifspdu.h:get_bcc
>    Cyclomatic Complexity 1 fs//cifs/cifsglob.h:set_credits
>    Cyclomatic Complexity 1 fs//cifs/cifsglob.h:get_next_mid
> 
> vim +/smbd_destroy +381 fs//cifs/connect.c
> 
>    312
>    313	static int ip_connect(struct TCP_Server_Info *server);
>    314	static int generic_ip_connect(struct TCP_Server_Info *server);
>    315	static void tlink_rb_insert(struct rb_root *root, struct tcon_link
> *new_tlink);
>    316	static void cifs_prune_tlinks(struct work_struct *work);
>    317	static int cifs_setup_volume_info(struct smb_vol *volume_info, char
> *mount_data,
>    318						const char *devname);
>    319
>    320	/*
>    321	 * cifs tcp session reconnection
>    322	 *
>    323	 * mark tcp session as reconnecting so temporarily locked
>    324	 * mark all smb sessions as reconnecting for tcp session
>    325	 * reconnect tcp session
>    326	 * wake up waiters on reconnection? - (not needed currently)
>    327	 */
>    328	int
>    329	cifs_reconnect(struct TCP_Server_Info *server)
>    330	{
>    331		int rc = 0;
>    332		struct list_head *tmp, *tmp2;
>    333		struct cifs_ses *ses;
>    334		struct cifs_tcon *tcon;
>    335		struct mid_q_entry *mid_entry;
>    336		struct list_head retry_list;
>    337
>    338		spin_lock(&GlobalMid_Lock);
>    339		if (server->tcpStatus == CifsExiting) {
>    340			/* the demux thread will exit normally
>    341			next time through the loop */
>    342			spin_unlock(&GlobalMid_Lock);
>    343			return rc;
>    344		} else
>    345			server->tcpStatus = CifsNeedReconnect;
>    346		spin_unlock(&GlobalMid_Lock);
>    347		server->maxBuf = 0;
>    348		server->max_read = 0;
>    349
>    350		cifs_dbg(FYI, "Reconnecting tcp session\n");
>    351
>    352		/* before reconnecting the tcp session, mark the smb
> session (uid)
>    353			and the tid bad so they are not used until
> reconnected */
>    354		cifs_dbg(FYI, "%s: marking sessions and tcons for
> reconnect\n",
>    355			 __func__);
>    356		spin_lock(&cifs_tcp_ses_lock);
>    357		list_for_each(tmp, &server->smb_ses_list) {
>    358			ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
>    359			ses->need_reconnect = true;
>    360			list_for_each(tmp2, &ses->tcon_list) {
>    361				tcon = list_entry(tmp2, struct cifs_tcon,
> tcon_list);
>    362				tcon->need_reconnect = true;
>    363			}
>    364			if (ses->tcon_ipc)
>    365				ses->tcon_ipc->need_reconnect = true;
>    366		}
>    367		spin_unlock(&cifs_tcp_ses_lock);
>    368
>    369		/* do not want to be sending data on a socket we are freeing
> */
>    370		cifs_dbg(FYI, "%s: tearing down socket\n", __func__);
>    371		mutex_lock(&server->srv_mutex);
>    372		if (server->ssocket) {
>    373			cifs_dbg(FYI, "State: 0x%x Flags: 0x%lx\n",
>    374				 server->ssocket->state, server->ssocket-
> >flags);
>    375			kernel_sock_shutdown(server->ssocket, SHUT_WR);
>    376			cifs_dbg(FYI, "Post shutdown state: 0x%x Flags:
> 0x%lx\n",
>    377				 server->ssocket->state, server->ssocket-
> >flags);
>    378			sock_release(server->ssocket);
>    379			server->ssocket = NULL;
>    380		} else if (cifs_rdma_enabled(server))
>  > 381			smbd_destroy(server);
>    382		server->sequence_number = 0;
>    383		server->session_estab = false;
>    384		kfree(server->session_key.response);
>    385		server->session_key.response = NULL;
>    386		server->session_key.len = 0;
>    387		server->lstrp = jiffies;
>    388
>    389		/* mark submitted MIDs for retry and issue callback */
>    390		INIT_LIST_HEAD(&retry_list);
>    391		cifs_dbg(FYI, "%s: moving mids to private list\n", __func__);
>    392		spin_lock(&GlobalMid_Lock);
>    393		list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
>    394			mid_entry = list_entry(tmp, struct mid_q_entry,
> qhead);
>    395			if (mid_entry->mid_state ==
> MID_REQUEST_SUBMITTED)
>    396				mid_entry->mid_state =
> MID_RETRY_NEEDED;
>    397			list_move(&mid_entry->qhead, &retry_list);
>    398		}
>    399		spin_unlock(&GlobalMid_Lock);
>    400		mutex_unlock(&server->srv_mutex);
>    401
>    402		cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__);
>    403		list_for_each_safe(tmp, tmp2, &retry_list) {
>    404			mid_entry = list_entry(tmp, struct mid_q_entry,
> qhead);
>    405			list_del_init(&mid_entry->qhead);
>    406			mid_entry->callback(mid_entry);
>    407		}
>    408
>    409		do {
>    410			try_to_freeze();
>    411
>    412			/* we should try only the port we connected to
> before */
>    413			mutex_lock(&server->srv_mutex);
>    414			if (cifs_rdma_enabled(server))
>    415				rc = smbd_reconnect(server);
>    416			else
>    417				rc = generic_ip_connect(server);
>    418			if (rc) {
>    419				cifs_dbg(FYI, "reconnect error %d\n", rc);
>    420				mutex_unlock(&server->srv_mutex);
>    421				msleep(3000);
>    422			} else {
>    423				atomic_inc(&tcpSesReconnectCount);
>    424				spin_lock(&GlobalMid_Lock);
>    425				if (server->tcpStatus != CifsExiting)
>    426					server->tcpStatus =
> CifsNeedNegotiate;
>    427				spin_unlock(&GlobalMid_Lock);
>    428				mutex_unlock(&server->srv_mutex);
>    429			}
>    430		} while (server->tcpStatus == CifsNeedReconnect);
>    431
>    432		if (server->tcpStatus == CifsNeedNegotiate)
>    433			mod_delayed_work(cifsiod_wq, &server->echo, 0);
>    434
>    435		return rc;
>    436	}
>    437
> 
> ---
> 0-DAY kernel test infrastructure                Open Source Technology Center
> https://na01.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.01
> .org%2Fpipermail%2Fkbuild-
> all&data=02%7C01%7Clongli%40microsoft.com%7C8eeef6813ee14ded2dcc08
> d5b4a4113a%7C72f988bf86f141af91ab2d7cd011db47%7C1%7C0%7C63661353
> 9125415461&sdata=OrOmZ1yCHuTbOlK8c6oBG9FpUbjBcQR5nGGa%2BntzwL
> E%3D&reserved=0                   Intel Corporation
--
To unsubscribe from this list: send the line "unsubscribe linux-cifs" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox series

Patch

diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 83b0234..5db3e9d 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -377,7 +377,8 @@  cifs_reconnect(struct TCP_Server_Info *server)
 			 server->ssocket->state, server->ssocket->flags);
 		sock_release(server->ssocket);
 		server->ssocket = NULL;
-	}
+	} else if (cifs_rdma_enabled(server))
+		smbd_destroy(server);
 	server->sequence_number = 0;
 	server->session_estab = false;
 	kfree(server->session_key.response);
@@ -710,10 +711,8 @@  static void clean_demultiplex_info(struct TCP_Server_Info *server)
 	wake_up_all(&server->request_q);
 	/* give those requests time to exit */
 	msleep(125);
-	if (cifs_rdma_enabled(server) && server->smbd_conn) {
-		smbd_destroy(server->smbd_conn);
-		server->smbd_conn = NULL;
-	}
+	if (cifs_rdma_enabled(server))
+		smbd_destroy(server);
 	if (server->ssocket) {
 		sock_release(server->ssocket);
 		server->ssocket = NULL;
diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
index c62f7c9..1aa2d35 100644
--- a/fs/cifs/smbdirect.c
+++ b/fs/cifs/smbdirect.c
@@ -318,6 +318,9 @@  static int smbd_conn_upcall(
 
 		info->transport_status = SMBD_DISCONNECTED;
 		smbd_process_disconnected(info);
+		wake_up(&info->disconn_wait);
+		wake_up_interruptible(&info->wait_reassembly_queue);
+		wake_up_interruptible_all(&info->wait_send_queue);
 		break;
 
 	default:
@@ -1476,17 +1479,97 @@  static void idle_connection_timer(struct work_struct *work)
 			info->keep_alive_interval*HZ);
 }
 
-/* Destroy this SMBD connection, called from upper layer */
-void smbd_destroy(struct smbd_connection *info)
+/*
+ * Destroy the transport and related RDMA and memory resources
+ * Need to go through all the pending counters and make sure on one is using
+ * the transport while it is destroyed
+ */
+void smbd_destroy(struct TCP_Server_Info *server)
 {
+	struct smbd_connection *info = server->smbd_conn;
+	struct smbd_response *response;
+	unsigned long flags;
+
+	if (!info) {
+		log_rdma_event(INFO, "rdma session already destroyed\n");
+		return;
+	}
+
 	log_rdma_event(INFO, "destroying rdma session\n");
+	if (info->transport_status != SMBD_DISCONNECTED) {
+		rdma_disconnect(server->smbd_conn->id);
+		log_rdma_event(INFO, "wait for transport being disconnected\n");
+		wait_event(
+			info->disconn_wait,
+			info->transport_status == SMBD_DISCONNECTED);
+	}
 
-	/* Kick off the disconnection process */
-	smbd_disconnect_rdma_connection(info);
+	log_rdma_event(INFO, "destroying qp\n");
+	ib_drain_qp(info->id->qp);
+	rdma_destroy_qp(info->id);
+
+	log_rdma_event(INFO, "cancelling idle timer\n");
+	cancel_delayed_work_sync(&info->idle_timer_work);
+	log_rdma_event(INFO, "cancelling send immediate work\n");
+	cancel_delayed_work_sync(&info->send_immediate_work);
 
-	log_rdma_event(INFO, "wait for transport being destroyed\n");
-	wait_event(info->wait_destroy,
-		info->transport_status == SMBD_DESTROYED);
+	log_rdma_event(INFO, "wait for all send posted to IB to finish\n");
+	wait_event(info->wait_send_pending,
+		atomic_read(&info->send_pending) == 0);
+	wait_event(info->wait_send_payload_pending,
+		atomic_read(&info->send_payload_pending) == 0);
+
+	/* It's not posssible for upper layer to get to reassembly */
+	log_rdma_event(INFO, "drain the reassembly queue\n");
+	do {
+		spin_lock_irqsave(&info->reassembly_queue_lock, flags);
+		response = _get_first_reassembly(info);
+		if (response) {
+			list_del(&response->list);
+			spin_unlock_irqrestore(
+				&info->reassembly_queue_lock, flags);
+			put_receive_buffer(info, response);
+		} else
+			spin_unlock_irqrestore(
+				&info->reassembly_queue_lock, flags);
+	} while (response);
+	info->reassembly_data_length = 0;
+
+	log_rdma_event(INFO, "free receive buffers\n");
+	wait_event(info->wait_receive_queues,
+		info->count_receive_queue + info->count_empty_packet_queue
+			== info->receive_credit_max);
+	destroy_receive_buffers(info);
+
+	/*
+	 * For performance reasons, memory registration and deregistration
+	 * are not locked by srv_mutex. It is possible some processes are
+	 * blocked on transport srv_mutex while holding memory registration.
+	 * Release the transport srv_mutex to allow them to hit the failure
+	 * path when sending data, and then release memory registartions.
+	 */
+	log_rdma_event(INFO, "freeing mr list\n");
+	wake_up_interruptible_all(&info->wait_mr);
+	while (atomic_read(&info->mr_used_count)) {
+		mutex_unlock(&server->srv_mutex);
+		msleep(1000);
+		mutex_lock(&server->srv_mutex);
+	}
+	destroy_mr_list(info);
+
+	ib_free_cq(info->send_cq);
+	ib_free_cq(info->recv_cq);
+	ib_dealloc_pd(info->pd);
+	rdma_destroy_id(info->id);
+
+	/* free mempools */
+	mempool_destroy(info->request_mempool);
+	kmem_cache_destroy(info->request_cache);
+
+	mempool_destroy(info->response_mempool);
+	kmem_cache_destroy(info->response_cache);
+
+	info->transport_status = SMBD_DESTROYED;
 
 	destroy_workqueue(info->workqueue);
 	kfree(info);
@@ -1511,17 +1594,9 @@  int smbd_reconnect(struct TCP_Server_Info *server)
 	 */
 	if (server->smbd_conn->transport_status == SMBD_CONNECTED) {
 		log_rdma_event(INFO, "disconnecting transport\n");
-		smbd_disconnect_rdma_connection(server->smbd_conn);
+		smbd_destroy(server);
 	}
 
-	/* wait until the transport is destroyed */
-	if (!wait_event_timeout(server->smbd_conn->wait_destroy,
-		server->smbd_conn->transport_status == SMBD_DESTROYED, 5*HZ))
-		return -EAGAIN;
-
-	destroy_workqueue(server->smbd_conn->workqueue);
-	kfree(server->smbd_conn);
-
 create_conn:
 	log_rdma_event(INFO, "creating rdma session\n");
 	server->smbd_conn = smbd_get_connection(
@@ -1730,12 +1805,13 @@  static struct smbd_connection *_smbd_get_connection(
 	conn_param.retry_count = SMBD_CM_RETRY;
 	conn_param.rnr_retry_count = SMBD_CM_RNR_RETRY;
 	conn_param.flow_control = 0;
-	init_waitqueue_head(&info->wait_destroy);
 
 	log_rdma_event(INFO, "connecting to IP %pI4 port %d\n",
 		&addr_in->sin_addr, port);
 
 	init_waitqueue_head(&info->conn_wait);
+	init_waitqueue_head(&info->disconn_wait);
+	init_waitqueue_head(&info->wait_reassembly_queue);
 	rc = rdma_connect(info->id, &conn_param);
 	if (rc) {
 		log_rdma_event(ERR, "rdma_connect() failed with %i\n", rc);
@@ -1759,8 +1835,6 @@  static struct smbd_connection *_smbd_get_connection(
 	}
 
 	init_waitqueue_head(&info->wait_send_queue);
-	init_waitqueue_head(&info->wait_reassembly_queue);
-
 	INIT_DELAYED_WORK(&info->idle_timer_work, idle_connection_timer);
 	INIT_DELAYED_WORK(&info->send_immediate_work, send_immediate_work);
 	queue_delayed_work(info->workqueue, &info->idle_timer_work,
@@ -1801,7 +1875,7 @@  static struct smbd_connection *_smbd_get_connection(
 
 allocate_mr_failed:
 	/* At this point, need to a full transport shutdown */
-	smbd_destroy(info);
+	smbd_destroy(server);
 	return NULL;
 
 negotiation_failed:
diff --git a/fs/cifs/smbdirect.h b/fs/cifs/smbdirect.h
index f9038da..7849989 100644
--- a/fs/cifs/smbdirect.h
+++ b/fs/cifs/smbdirect.h
@@ -71,6 +71,7 @@  struct smbd_connection {
 	struct completion ri_done;
 	wait_queue_head_t conn_wait;
 	wait_queue_head_t wait_destroy;
+	wait_queue_head_t disconn_wait;
 
 	struct completion negotiate_completion;
 	bool negotiate_done;
@@ -288,7 +289,7 @@  struct smbd_connection *smbd_get_connection(
 /* Reconnect SMBDirect session */
 int smbd_reconnect(struct TCP_Server_Info *server);
 /* Destroy SMBDirect session */
-void smbd_destroy(struct smbd_connection *info);
+void smbd_destroy(struct TCP_Server_Info *server);
 
 /* Interface for carrying upper layer I/O through send/recv */
 int smbd_recv(struct smbd_connection *info, struct msghdr *msg);