diff mbox series

[v2,net-next,3/5] mptcp: do not queue excessive data on subflows

Message ID 0ee2b775b36e79bbd46828d8fd95b0b4c030928b.1611153172.git.pabeni@redhat.com
State Deferred, archived
Headers show
Series mptcp: re-enable sndbuf autotune | expand

Commit Message

Paolo Abeni Jan. 20, 2021, 2:39 p.m. UTC
The current packet scheduler can enqueue up to sndbuf
data on each subflow. If the send buffer is large and
the subflows are not symmetric, this could lead to
suboptimal aggregate bandwidth utilization.

Limit the amount of queued data to the maximum send
window.

Reviewed-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
---
 net/mptcp/protocol.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index d07e60330df56..e741201acc98f 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -1389,7 +1389,7 @@  static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
 			continue;
 
 		nr_active += !subflow->backup;
-		if (!sk_stream_memory_free(subflow->tcp_sock))
+		if (!sk_stream_memory_free(subflow->tcp_sock) || !tcp_sk(ssk)->snd_wnd)
 			continue;
 
 		pace = READ_ONCE(ssk->sk_pacing_rate);
@@ -1415,7 +1415,7 @@  static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
 	if (send_info[0].ssk) {
 		msk->last_snd = send_info[0].ssk;
 		msk->snd_burst = min_t(int, MPTCP_SEND_BURST_SIZE,
-				       sk_stream_wspace(msk->last_snd));
+				       tcp_sk(msk->last_snd)->snd_wnd);
 		return msk->last_snd;
 	}