@@ -41,6 +41,8 @@ struct mptcp_skb_cb {
static struct percpu_counter mptcp_sockets_allocated;
+static void mptcp_clean_una(struct sock *sk);
+
/* If msk has an initial subflow socket, and the MP_CAPABLE handshake has not
* completed yet or has failed, return the subflow socket.
* Otherwise return NULL.
@@ -681,13 +683,30 @@ static void mptcp_reset_timer(struct sock *sk)
void mptcp_data_acked(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
+ bool fast = false;
mptcp_reset_timer(sk);
if ((test_bit(MPTCP_NOSPACE, &msk->flags) ||
- (inet_sk_state_load(sk) != TCP_ESTABLISHED)) &&
- schedule_work(&mptcp_sk(sk)->work))
- sock_hold(sk);
+ (inet_sk_state_load(sk) != TCP_ESTABLISHED))) {
+ if (READ_ONCE(sk->sk_lock.owned))
+ goto slowpath;
+
+ if (unlikely(!spin_trylock_bh(&sk->sk_lock.slock)))
+ goto slowpath;
+
+ if (!READ_ONCE(sk->sk_lock.owned)) {
+ mptcp_clean_una(sk);
+ fast = true;
+ }
+
+ spin_unlock_bh(&sk->sk_lock.slock);
+ if (fast)
+ return;
+ slowpath:
+ if (schedule_work(&mptcp_sk(sk)->work))
+ sock_hold(sk);
+ }
}
void mptcp_subflow_eof(struct sock *sk)
In most cases the worker won't be scheduled because socket has enough space and is established. If the mptcp retransmit queue needs to cleaned up its possible to try to acquire the mptcp socket lock anyway. This allows to avoid the work queue unless the socket is currently owned. Signed-off-by: Florian Westphal <fw@strlen.de> --- net/mptcp/protocol.c | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-)