@@ -380,6 +380,7 @@ struct sock {
atomic_t sk_wmem_alloc;
atomic_t sk_omem_alloc;
int sk_sndbuf;
+ int sk_effective_sndbuf;
struct sk_buff_head sk_write_queue;
kmemcheck_bitfield_begin(flags);
unsigned int sk_shutdown : 2,
@@ -779,6 +780,14 @@ static inline bool sk_acceptq_is_full(const struct sock *sk)
return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
}
+static inline void sk_set_effective_sndbuf(struct sock *sk)
+{
+ if (sk->sk_wmem_queued > sk->sk_sndbuf)
+ sk->sk_effective_sndbuf = sk->sk_sndbuf;
+ else
+ sk->sk_effective_sndbuf = sk->sk_wmem_queued;
+}
+
/*
* Compute minimal free write space needed to queue new packets.
*/
@@ -789,6 +798,9 @@ static inline int sk_stream_min_wspace(const struct sock *sk)
static inline int sk_stream_wspace(const struct sock *sk)
{
+ if (sk->sk_effective_sndbuf)
+ return sk->sk_effective_sndbuf - sk->sk_wmem_queued;
+
return sk->sk_sndbuf - sk->sk_wmem_queued;
}
@@ -2309,6 +2309,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->sk_allocation = GFP_KERNEL;
sk->sk_rcvbuf = sysctl_rmem_default;
sk->sk_sndbuf = sysctl_wmem_default;
+ sk->sk_effective_sndbuf = 0;
sk->sk_state = TCP_CLOSE;
sk_set_socket(sk, sock);
@@ -32,6 +32,7 @@ void sk_stream_write_space(struct sock *sk)
if (sk_stream_is_writeable(sk) && sock) {
clear_bit(SOCK_NOSPACE, &sock->flags);
+ sk->sk_effective_sndbuf = 0;
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
@@ -845,6 +845,7 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
sk->sk_prot->enter_memory_pressure(sk);
sk_stream_moderate_sndbuf(sk);
}
+ sk_set_effective_sndbuf(sk);
return NULL;
}
@@ -939,9 +940,10 @@ new_segment:
tcp_mark_push(tp, skb);
goto new_segment;
}
- if (!sk_wmem_schedule(sk, copy))
+ if (!sk_wmem_schedule(sk, copy)) {
+ sk_set_effective_sndbuf(sk);
goto wait_for_memory;
-
+ }
if (can_coalesce) {
skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
} else {
@@ -1214,8 +1216,10 @@ new_segment:
copy = min_t(int, copy, pfrag->size - pfrag->offset);
- if (!sk_wmem_schedule(sk, copy))
+ if (!sk_wmem_schedule(sk, copy)) {
+ sk_set_effective_sndbuf(sk);
goto wait_for_memory;
+ }
err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
pfrag->page,