diff mbox series

[v2] Squash-to "mptcp: refactor shutdown and close"

Message ID f4551b5fce944cf69dbbecbacf5ac354999b76fe.1604697929.git.pabeni@redhat.com
State Accepted, archived
Commit a9dd786ca6339739998897ed5def23b81284648c
Delegated to: Matthieu Baerts
Headers show
Series [v2] Squash-to "mptcp: refactor shutdown and close" | expand

Commit Message

Paolo Abeni Nov. 6, 2020, 9:26 p.m. UTC
The msk status for fallback socket is not tracked correctly.
We must switch to close only after the subflow is moved
there, or the worker will not run and pending mptcp data
will not be spooled after close()

Signed-off-by: Paolo Abeni <pabeni@redhat.com>
---
 net/mptcp/protocol.c | 13 +++++++++----
 net/mptcp/subflow.c  |  1 -
 2 files changed, 9 insertions(+), 5 deletions(-)

Comments

Mat Martineau Nov. 6, 2020, 11:30 p.m. UTC | #1
On Fri, 6 Nov 2020, Paolo Abeni wrote:

> The msk status for fallback socket is not tracked correctly.
> We must switch to close only after the subflow is moved
> there, or the worker will not run and pending mptcp data
> will not be spooled after close()
>
> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
> ---
> net/mptcp/protocol.c | 13 +++++++++----
> net/mptcp/subflow.c  |  1 -
> 2 files changed, 9 insertions(+), 5 deletions(-)
>
> diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
> index a6bd06c724d5..4abec5f74f1c 100644
> --- a/net/mptcp/protocol.c
> +++ b/net/mptcp/protocol.c
> @@ -764,8 +764,10 @@ static void mptcp_check_for_eof(struct mptcp_sock *msk)
>
> 	mptcp_for_each_subflow(msk, subflow)
> 		receivers += !subflow->rx_eof;
> +	if (receivers)
> +		return;
>
> -	if (!receivers && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
> +	if (!(sk->sk_shutdown & RCV_SHUTDOWN)) {
> 		/* hopefully temporary hack: propagate shutdown status
> 		 * to msk, when all subflows agree on it
> 		 */
> @@ -775,6 +777,8 @@ static void mptcp_check_for_eof(struct mptcp_sock *msk)
> 		set_bit(MPTCP_DATA_READY, &msk->flags);
> 		sk->sk_data_ready(sk);
> 	}
> +	if (sk->sk_state != TCP_CLOSE && sk->sk_shutdown == SHUTDOWN_MASK)
> +		inet_sk_state_store(sk, TCP_CLOSE);
> }
>
> static bool mptcp_ext_cache_refill(struct mptcp_sock *msk)
> @@ -2157,11 +2161,12 @@ static void mptcp_close(struct sock *sk, long timeout)
> 	lock_sock(sk);
> 	sk->sk_shutdown = SHUTDOWN_MASK;
>
> -	if (sk->sk_state == TCP_LISTEN ||
> -	    __mptcp_check_fallback((struct mptcp_sock *)sk)) {
> +	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCP_CLOSE)) {
> 		inet_sk_state_store(sk, TCP_CLOSE);
> 		goto cleanup;
> -	} else if (sk->sk_state == TCP_CLOSE) {
> +	} else if (__mptcp_check_fallback((struct mptcp_sock *)sk)) {
> +		if (!mptcp_send_head(sk))
> +			inet_sk_state_store(sk, TCP_CLOSE);
> 		goto cleanup;
> 	}
>
> diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
> index 1e9a72af67dc..fd356dc9d580 100644
> --- a/net/mptcp/subflow.c
> +++ b/net/mptcp/subflow.c
> @@ -1282,7 +1282,6 @@ static void subflow_state_change(struct sock *sk)
> 		mptcp_data_ready(parent, sk);
>
> 	if (__mptcp_check_fallback(mptcp_sk(parent)) &&
> -	    !(parent->sk_shutdown & RCV_SHUTDOWN) &&
> 	    !subflow->rx_eof && subflow_is_done(sk)) {
> 		subflow->rx_eof = 1;
> 		mptcp_subflow_eof(parent);
> -- 
> 2.26.2

Thanks Paolo. This looks ok to squash.

--
Mat Martineau
Intel
Matthieu Baerts Nov. 9, 2020, 5:32 p.m. UTC | #2
Hi Paolo, Mat,

On 06/11/2020 22:26, Paolo Abeni wrote:
> The msk status for fallback socket is not tracked correctly.
> We must switch to close only after the subflow is moved
> there, or the worker will not run and pending mptcp data
> will not be spooled after close()

Thank you for the patch and the review!

- a9dd786ca633: "squashed" in "mptcp: refactor shutdown and close"
- Results: 3f3a27631edd..42df929e2c92

Tests + export are in progress!

Cheers,
Matt
diff mbox series

Patch

diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index a6bd06c724d5..4abec5f74f1c 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -764,8 +764,10 @@  static void mptcp_check_for_eof(struct mptcp_sock *msk)
 
 	mptcp_for_each_subflow(msk, subflow)
 		receivers += !subflow->rx_eof;
+	if (receivers)
+		return;
 
-	if (!receivers && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
+	if (!(sk->sk_shutdown & RCV_SHUTDOWN)) {
 		/* hopefully temporary hack: propagate shutdown status
 		 * to msk, when all subflows agree on it
 		 */
@@ -775,6 +777,8 @@  static void mptcp_check_for_eof(struct mptcp_sock *msk)
 		set_bit(MPTCP_DATA_READY, &msk->flags);
 		sk->sk_data_ready(sk);
 	}
+	if (sk->sk_state != TCP_CLOSE && sk->sk_shutdown == SHUTDOWN_MASK)
+		inet_sk_state_store(sk, TCP_CLOSE);
 }
 
 static bool mptcp_ext_cache_refill(struct mptcp_sock *msk)
@@ -2157,11 +2161,12 @@  static void mptcp_close(struct sock *sk, long timeout)
 	lock_sock(sk);
 	sk->sk_shutdown = SHUTDOWN_MASK;
 
-	if (sk->sk_state == TCP_LISTEN ||
-	    __mptcp_check_fallback((struct mptcp_sock *)sk)) {
+	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCP_CLOSE)) {
 		inet_sk_state_store(sk, TCP_CLOSE);
 		goto cleanup;
-	} else if (sk->sk_state == TCP_CLOSE) {
+	} else if (__mptcp_check_fallback((struct mptcp_sock *)sk)) {
+		if (!mptcp_send_head(sk))
+			inet_sk_state_store(sk, TCP_CLOSE);
 		goto cleanup;
 	}
 
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 1e9a72af67dc..fd356dc9d580 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -1282,7 +1282,6 @@  static void subflow_state_change(struct sock *sk)
 		mptcp_data_ready(parent, sk);
 
 	if (__mptcp_check_fallback(mptcp_sk(parent)) &&
-	    !(parent->sk_shutdown & RCV_SHUTDOWN) &&
 	    !subflow->rx_eof && subflow_is_done(sk)) {
 		subflow->rx_eof = 1;
 		mptcp_subflow_eof(parent);