@@ -1740,9 +1740,16 @@ int tcp_mmap_hook(struct socket *sock, enum mmap_hook mode)
*/
return 0;
}
- /* TODO: Move here the stuff that can been done after
- * current->mm->mmap_sem has been released.
- */
+ if (mode == MMAP_HOOK_COMMIT) {
+ u32 offset;
+
+ tcp_rcv_space_adjust(sk);
+
+ /* Clean up data we have read: This will do ACK frames. */
+ tcp_recv_skb(sk, tcp_sk(sk)->copied_seq, &offset);
+
+ tcp_cleanup_rbuf(sk, PAGE_SIZE);
+ }
release_sock(sk);
return 0;
}
@@ -1843,13 +1850,8 @@ int tcp_mmap(struct file *file, struct socket *sock,
if (ret)
goto out;
}
- /* operation is complete, we can 'consume' all skbs */
+ /* operation is complete, skbs will be freed from tcp_mmap_hook() */
tp->copied_seq = seq;
- tcp_rcv_space_adjust(sk);
-
- /* Clean up data we have read: This will do ACK frames. */
- tcp_recv_skb(sk, seq, &offset);
- tcp_cleanup_rbuf(sk, size);
ret = 0;
out:
Freeing all skbs and sending ACK is time consuming. This is currently done while both current->mm->mmap_sem and socket lock are held, in tcp_mmap() Thanks to mmap_hook infrastructure, we can perform the cleanup after current->mm->mmap_sem has been released, thus allowing other threads to perform mm operations without delay. Note that the preparation work (building the array of page pointers) can also be done from tcp_mmap_hook() while mmap_sem has not been taken yet, but this is another independent change. Signed-off-by: Eric Dumazet <edumazet@google.com> --- net/ipv4/tcp.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-)