diff mbox series

[net-next] tcp: try to keep packet if SYN_RCV race is lost

Message ID 1518531252.3715.178.camel@gmail.com
State Accepted, archived
Delegated to: David Miller
Headers show
Series [net-next] tcp: try to keep packet if SYN_RCV race is lost | expand

Commit Message

Eric Dumazet Feb. 13, 2018, 2:14 p.m. UTC
From: Eric Dumazet <edumazet@google.com>

배석진 reported that in some situations, packets for a given 5-tuple
end up being processed by different CPUS.

This involves RPS, and fragmentation.

배석진 is seeing packet drops when a SYN_RECV request socket is
moved into ESTABLISH state. Other states are protected by socket lock.

This is caused by a CPU losing the race, and simply not caring enough.

Since this seems to occur frequently, we can do better and perform
a second lookup.

Note that all needed memory barriers are already in the existing code,
thanks to the spin_lock()/spin_unlock() pair in inet_ehash_insert()
and reqsk_put(). The second lookup must find the new socket,
unless it has already been accepted and closed by another cpu.

Note that the fragmentation could be avoided in the first place by
use of a correct TCP MSS option in the SYN{ACK} packet, but this
does not mean we can not be more robust.

Many thanks to 배석진 for a very detailed analysis.

Reported-by: 배석진 <soukjin.bae@samsung.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
---
 include/net/tcp.h        |    3 ++-
 net/ipv4/tcp_input.c     |    4 +++-
 net/ipv4/tcp_ipv4.c      |   13 ++++++++++++-
 net/ipv4/tcp_minisocks.c |    3 ++-
 net/ipv6/tcp_ipv6.c      |   13 ++++++++++++-
 5 files changed, 31 insertions(+), 5 deletions(-)

Comments

David Miller Feb. 14, 2018, 7:22 p.m. UTC | #1
From: Eric Dumazet <eric.dumazet@gmail.com>
Date: Tue, 13 Feb 2018 06:14:12 -0800

> From: Eric Dumazet <edumazet@google.com>
> 
> 배석진 reported that in some situations, packets for a given 5-tuple
> end up being processed by different CPUS.
> 
> This involves RPS, and fragmentation.
> 
> 배석진 is seeing packet drops when a SYN_RECV request socket is
> moved into ESTABLISH state. Other states are protected by socket lock.
> 
> This is caused by a CPU losing the race, and simply not caring enough.
> 
> Since this seems to occur frequently, we can do better and perform
> a second lookup.
> 
> Note that all needed memory barriers are already in the existing code,
> thanks to the spin_lock()/spin_unlock() pair in inet_ehash_insert()
> and reqsk_put(). The second lookup must find the new socket,
> unless it has already been accepted and closed by another cpu.
> 
> Note that the fragmentation could be avoided in the first place by
> use of a correct TCP MSS option in the SYN{ACK} packet, but this
> does not mean we can not be more robust.
> 
> Many thanks to 배석진 for a very detailed analysis.
> 
> Reported-by: 배석진 <soukjin.bae@samsung.com>
> Signed-off-by: Eric Dumazet <edumazet@google.com>

Applied, thanks Eric.
diff mbox series

Patch

diff --git a/include/net/tcp.h b/include/net/tcp.h
index e3fc667f9ac2601d8f9cb50261a7948c41709664..92b06c6e7732ad7c61b580427fc085fa0dff1063 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -374,7 +374,8 @@  enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
 					      struct sk_buff *skb,
 					      const struct tcphdr *th);
 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
-			   struct request_sock *req, bool fastopen);
+			   struct request_sock *req, bool fastopen,
+			   bool *lost_race);
 int tcp_child_process(struct sock *parent, struct sock *child,
 		      struct sk_buff *skb);
 void tcp_enter_loss(struct sock *sk);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 575d3c1fb6e835e225834ca45f58b74ea29e000b..a6b48f6253e3f91d396bf6b03f06be285ba1006c 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5870,10 +5870,12 @@  int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
 	tp->rx_opt.saw_tstamp = 0;
 	req = tp->fastopen_rsk;
 	if (req) {
+		bool req_stolen;
+
 		WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
 		    sk->sk_state != TCP_FIN_WAIT1);
 
-		if (!tcp_check_req(sk, skb, req, true))
+		if (!tcp_check_req(sk, skb, req, true, &req_stolen))
 			goto discard;
 	}
 
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index f8ad397e285e9b8db0b04f8abc30a42f22294ef9..6d7e0c061dae14cdea66af73a77b5b7232085cd3 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1672,6 +1672,7 @@  int tcp_v4_rcv(struct sk_buff *skb)
 
 	if (sk->sk_state == TCP_NEW_SYN_RECV) {
 		struct request_sock *req = inet_reqsk(sk);
+		bool req_stolen = false;
 		struct sock *nsk;
 
 		sk = req->rsk_listener;
@@ -1694,10 +1695,20 @@  int tcp_v4_rcv(struct sk_buff *skb)
 			th = (const struct tcphdr *)skb->data;
 			iph = ip_hdr(skb);
 			tcp_v4_fill_cb(skb, iph, th);
-			nsk = tcp_check_req(sk, skb, req, false);
+			nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
 		}
 		if (!nsk) {
 			reqsk_put(req);
+			if (req_stolen) {
+				/* Another cpu got exclusive access to req
+				 * and created a full blown socket.
+				 * Try to feed this packet to this socket
+				 * instead of discarding it.
+				 */
+				tcp_v4_restore_cb(skb);
+				sock_put(sk);
+				goto lookup;
+			}
 			goto discard_and_relse;
 		}
 		if (nsk == sk) {
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index a8384b0c11f8fa589e2ed5311899b62c80a269f8..e7e36433cdb5d9aabb5d194ef6395f3c9e415d56 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -578,7 +578,7 @@  EXPORT_SYMBOL(tcp_create_openreq_child);
 
 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
 			   struct request_sock *req,
-			   bool fastopen)
+			   bool fastopen, bool *req_stolen)
 {
 	struct tcp_options_received tmp_opt;
 	struct sock *child;
@@ -785,6 +785,7 @@  struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
 
 	sock_rps_save_rxhash(child, skb);
 	tcp_synack_rtt_meas(child, req);
+	*req_stolen = !own_req;
 	return inet_csk_complete_hashdance(sk, child, req, own_req);
 
 listen_overflow:
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 412139f4eccd96923daaea064cd9fb8be13f5916..883df0ad5bfe9d5373c0f7ed37107cdc57959569 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1451,6 +1451,7 @@  static int tcp_v6_rcv(struct sk_buff *skb)
 
 	if (sk->sk_state == TCP_NEW_SYN_RECV) {
 		struct request_sock *req = inet_reqsk(sk);
+		bool req_stolen = false;
 		struct sock *nsk;
 
 		sk = req->rsk_listener;
@@ -1470,10 +1471,20 @@  static int tcp_v6_rcv(struct sk_buff *skb)
 			th = (const struct tcphdr *)skb->data;
 			hdr = ipv6_hdr(skb);
 			tcp_v6_fill_cb(skb, hdr, th);
-			nsk = tcp_check_req(sk, skb, req, false);
+			nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
 		}
 		if (!nsk) {
 			reqsk_put(req);
+			if (req_stolen) {
+				/* Another cpu got exclusive access to req
+				 * and created a full blown socket.
+				 * Try to feed this packet to this socket
+				 * instead of discarding it.
+				 */
+				tcp_v6_restore_cb(skb);
+				sock_put(sk);
+				goto lookup;
+			}
 			goto discard_and_relse;
 		}
 		if (nsk == sk) {