diff mbox series

[RFC,mptcp-next,3/3] mptcp: try to clean data inplace if possible

Message ID 20200905103324.27066-4-fw@strlen.de
State RFC, archived
Commit bef7797391292b1210d0ecd4b4a90483e5747e05
Delegated to: Florian Westphal
Headers show
Series mptcp: rework poll+nospace handling | expand

Commit Message

Florian Westphal Sept. 5, 2020, 10:33 a.m. UTC
In most cases the worker won't be scheduled because socket
has enough space and is established.

If the mptcp retransmit queue needs to cleaned up its possible to try to
acquire the mptcp socket lock anyway.  This allows to avoid the work
queue unless the socket is currently owned.

Signed-off-by: Florian Westphal <fw@strlen.de>
---
 net/mptcp/protocol.c | 25 ++++++++++++++++++++++---
 1 file changed, 22 insertions(+), 3 deletions(-)
diff mbox series

Patch

diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index bc5231d5b06d..d43f7e7125a1 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -41,6 +41,8 @@  struct mptcp_skb_cb {
 
 static struct percpu_counter mptcp_sockets_allocated;
 
+static void mptcp_clean_una(struct sock *sk);
+
 /* If msk has an initial subflow socket, and the MP_CAPABLE handshake has not
  * completed yet or has failed, return the subflow socket.
  * Otherwise return NULL.
@@ -681,13 +683,30 @@  static void mptcp_reset_timer(struct sock *sk)
 void mptcp_data_acked(struct sock *sk)
 {
 	struct mptcp_sock *msk = mptcp_sk(sk);
+	bool fast = false;
 
 	mptcp_reset_timer(sk);
 
 	if ((test_bit(MPTCP_NOSPACE, &msk->flags) ||
-	    (inet_sk_state_load(sk) != TCP_ESTABLISHED)) &&
-	    schedule_work(&mptcp_sk(sk)->work))
-		sock_hold(sk);
+	    (inet_sk_state_load(sk) != TCP_ESTABLISHED))) {
+		if (READ_ONCE(sk->sk_lock.owned))
+			goto slowpath;
+
+		if (unlikely(!spin_trylock_bh(&sk->sk_lock.slock)))
+			goto slowpath;
+
+		if (!READ_ONCE(sk->sk_lock.owned)) {
+			mptcp_clean_una(sk);
+			fast = true;
+		}
+
+		spin_unlock_bh(&sk->sk_lock.slock);
+		if (fast)
+			return;
+	slowpath:
+		if (schedule_work(&mptcp_sk(sk)->work))
+			sock_hold(sk);
+	}
 }
 
 void mptcp_subflow_eof(struct sock *sk)