@@ -292,8 +292,13 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
if (err)
goto out;
+ skb_orphan(skb);
+
+ spin_lock_irqsave(&list->lock, flags);
+
if (!sk_rmem_schedule(sk, skb->truesize)) {
err = -ENOBUFS;
+ spin_unlock_irqrestore(&list->lock, flags);
goto out;
}
@@ -307,7 +312,6 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
*/
skb_len = skb->len;
- spin_lock_irqsave(&list->lock, flags);
skb->dropcount = atomic_read(&sk->sk_drops);
__skb_queue_tail(list, skb);
spin_unlock_irqrestore(&list->lock, flags);
@@ -855,29 +855,21 @@ out:
*/
static unsigned int first_packet_length(struct sock *sk)
{
- struct sk_buff_head list_kill, *rcvq = &sk->sk_receive_queue;
+ struct sk_buff_head *rcvq = &sk->sk_receive_queue;
struct sk_buff *skb;
unsigned int res;
- __skb_queue_head_init(&list_kill);
-
spin_lock_bh(&rcvq->lock);
while ((skb = skb_peek(rcvq)) != NULL &&
udp_lib_checksum_complete(skb)) {
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
IS_UDPLITE(sk));
__skb_unlink(skb, rcvq);
- __skb_queue_tail(&list_kill, skb);
+ skb_kill_datagram(sk, skb, 0);
}
res = skb ? skb->len : 0;
spin_unlock_bh(&rcvq->lock);
- if (!skb_queue_empty(&list_kill)) {
- lock_sock(sk);
- __skb_queue_purge(&list_kill);
- sk_mem_reclaim_partial(sk);
- release_sock(sk);
- }
return res;
}
@@ -1003,17 +995,17 @@ try_again:
err = ulen;
out_free:
- lock_sock(sk);
+ spin_lock_bh(&sk->sk_receive_queue.lock);
skb_free_datagram(sk, skb);
- release_sock(sk);
+ spin_unlock_bh(&sk->sk_receive_queue.lock);
out:
return err;
csum_copy_err:
- lock_sock(sk);
+ spin_lock_bh(&sk->sk_receive_queue.lock);
if (!skb_kill_datagram(sk, skb, flags))
- UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
- release_sock(sk);
+ UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
+ spin_unlock_bh(&sk->sk_receive_queue.lock);
if (noblock)
return -EAGAIN;
@@ -1095,7 +1087,6 @@ drop:
int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
struct udp_sock *up = udp_sk(sk);
- int rc;
int is_udplite = IS_UDPLITE(sk);
/*
@@ -1175,16 +1166,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
goto drop;
}
- rc = 0;
-
- bh_lock_sock(sk);
- if (!sock_owned_by_user(sk))
- rc = __udp_queue_rcv_skb(sk, skb);
- else
- sk_add_backlog(sk, skb);
- bh_unlock_sock(sk);
-
- return rc;
+ return __udp_queue_rcv_skb(sk, skb);
drop:
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
@@ -288,23 +288,23 @@ try_again:
err = ulen;
out_free:
- lock_sock(sk);
+ spin_lock_bh(&sk->sk_receive_queue.lock);
skb_free_datagram(sk, skb);
- release_sock(sk);
+ spin_unlock_bh(&sk->sk_receive_queue.lock);
out:
return err;
csum_copy_err:
- lock_sock(sk);
+ spin_lock_bh(&sk->sk_receive_queue.lock);
if (!skb_kill_datagram(sk, skb, flags)) {
if (is_udp4)
- UDP_INC_STATS_USER(sock_net(sk),
+ UDP_INC_STATS_BH(sock_net(sk),
UDP_MIB_INERRORS, is_udplite);
else
- UDP6_INC_STATS_USER(sock_net(sk),
+ UDP6_INC_STATS_BH(sock_net(sk),
UDP_MIB_INERRORS, is_udplite);
}
- release_sock(sk);
+ spin_unlock_bh(&sk->sk_receive_queue.lock);
if (flags & MSG_DONTWAIT)
return -EAGAIN;
@@ -468,21 +468,10 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
while ((sk2 = udp_v6_mcast_next(net, sk_nulls_next(sk2), uh->dest, daddr,
uh->source, saddr, dif))) {
struct sk_buff *buff = skb_clone(skb, GFP_ATOMIC);
- if (buff) {
- bh_lock_sock(sk2);
- if (!sock_owned_by_user(sk2))
- udpv6_queue_rcv_skb(sk2, buff);
- else
- sk_add_backlog(sk2, buff);
- bh_unlock_sock(sk2);
- }
+ if (buff)
+ udpv6_queue_rcv_skb(sk2, buff);
}
- bh_lock_sock(sk);
- if (!sock_owned_by_user(sk))
- udpv6_queue_rcv_skb(sk, skb);
- else
- sk_add_backlog(sk, skb);
- bh_unlock_sock(sk);
+ udpv6_queue_rcv_skb(sk, skb);
out:
spin_unlock(&hslot->lock);
return 0;
@@ -597,12 +586,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
/* deliver */
- bh_lock_sock(sk);
- if (!sock_owned_by_user(sk))
- udpv6_queue_rcv_skb(sk, skb);
- else
- sk_add_backlog(sk, skb);
- bh_unlock_sock(sk);
+ udpv6_queue_rcv_skb(sk, skb);
sock_put(sk);
return 0;