diff mbox series

[net-next,2/3] tcp: avoid integer overflows in tcp_rcv_space_adjust()

Message ID 20171211015504.26551-3-edumazet@google.com
State Accepted, archived
Delegated to: David Miller
Headers show
Series tcp: better receiver autotuning | expand

Commit Message

Eric Dumazet Dec. 11, 2017, 1:55 a.m. UTC
When using large tcp_rmem[2] values (I did tests with 500 MB),
I noticed overflows while computing rcvwin.

Lets fix this before the following patch.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
Acked-by: Wei Wang <weiwan@google.com>
---
 include/linux/tcp.h  |  2 +-
 net/ipv4/tcp_input.c | 12 +++++++-----
 2 files changed, 8 insertions(+), 6 deletions(-)

Comments

Neal Cardwell Dec. 11, 2017, 5:47 p.m. UTC | #1
On Sun, Dec 10, 2017 at 8:55 PM, Eric Dumazet <edumazet@google.com> wrote:
> When using large tcp_rmem[2] values (I did tests with 500 MB),
> I noticed overflows while computing rcvwin.
>
> Lets fix this before the following patch.
>
> Signed-off-by: Eric Dumazet <edumazet@google.com>
> Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
> Acked-by: Wei Wang <weiwan@google.com>
> ---

Acked-by: Neal Cardwell <ncardwell@google.com>

Thanks, Eric!

neal
diff mbox series

Patch

diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index ca4a6361389b8a3b268ca5b0f4778662a1f7d315..4f93f0953c411322dc6403af7d1b9b6c3e30bd4f 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -344,7 +344,7 @@  struct tcp_sock {
 
 /* Receiver queue space */
 	struct {
-		int	space;
+		u32	space;
 		u32	seq;
 		u64	time;
 	} rcvq_space;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 746a6773c482f5d419ddc9d7c9d52949cbb74cfb..2900e58738cde0ad1ab4a034b6300876ac276edb 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -576,8 +576,8 @@  static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
 void tcp_rcv_space_adjust(struct sock *sk)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
+	u32 copied;
 	int time;
-	int copied;
 
 	tcp_mstamp_refresh(tp);
 	time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time);
@@ -600,12 +600,13 @@  void tcp_rcv_space_adjust(struct sock *sk)
 
 	if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf &&
 	    !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
-		int rcvwin, rcvmem, rcvbuf;
+		int rcvmem, rcvbuf;
+		u64 rcvwin;
 
 		/* minimal window to cope with packet losses, assuming
 		 * steady state. Add some cushion because of small variations.
 		 */
-		rcvwin = (copied << 1) + 16 * tp->advmss;
+		rcvwin = ((u64)copied << 1) + 16 * tp->advmss;
 
 		/* If rate increased by 25%,
 		 *	assume slow start, rcvwin = 3 * copied
@@ -625,8 +626,9 @@  void tcp_rcv_space_adjust(struct sock *sk)
 		while (tcp_win_from_space(sk, rcvmem) < tp->advmss)
 			rcvmem += 128;
 
-		rcvbuf = min(rcvwin / tp->advmss * rcvmem,
-			     sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
+		do_div(rcvwin, tp->advmss);
+		rcvbuf = min_t(u64, rcvwin * rcvmem,
+			       sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
 		if (rcvbuf > sk->sk_rcvbuf) {
 			sk->sk_rcvbuf = rcvbuf;