Patchwork [05/11] udp: multicast RX should increment SNMP/sk_drops counter in allocation failures

login
register
mail settings
Submitter Paolo Pisati
Date July 11, 2011, 8:17 a.m.
Message ID <1310372268-3840-6-git-send-email-paolo.pisati@canonical.com>
Download mbox | patch
Permalink /patch/104173/
State New
Headers show

Comments

Paolo Pisati - July 11, 2011, 8:17 a.m.
From: Eric Dumazet <eric.dumazet@gmail.com>

BugLink: http://bugs.launchpad.net/bugs/807462

commit upstream f6b8f32ca71406de718391369490f6b1e81fe0bb

(patch necessary for 6ed41136ab20c99d47792b3f19171ab9e523a97f)

When skb_clone() fails, we should increment sk_drops and SNMP counters.

Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Paolo Pisati <paolo.pisati@canonical.com>
---
 net/ipv4/udp.c |   12 +++++++++++-
 net/ipv6/udp.c |    8 +++++++-
 2 files changed, 18 insertions(+), 2 deletions(-)

Patch

diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 6a39004..595144d 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1192,12 +1192,22 @@  static void flush_stack(struct sock **stack, unsigned int count,
 {
 	unsigned int i;
 	struct sk_buff *skb1 = NULL;
+	struct sock *sk;
 
 	for (i = 0; i < count; i++) {
+		sk = stack[i];
 		if (likely(skb1 == NULL))
 			skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
 
-		if (skb1 && udp_queue_rcv_skb(stack[i], skb1) <= 0)
+		if (!skb1) {
+			atomic_inc(&sk->sk_drops);
+			UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
+					 IS_UDPLITE(sk));
+			UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
+					 IS_UDPLITE(sk));
+		}
+
+		if (skb1 && udp_queue_rcv_skb(sk, skb1) <= 0)
 			skb1 = NULL;
 	}
 	if (unlikely(skb1))
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index ede7a73..6fe1846 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -450,14 +450,20 @@  static void flush_stack(struct sock **stack, unsigned int count,
 	for (i = 0; i < count; i++) {
 		skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
 
+		sk = stack[i];
 		if (skb1) {
-			sk = stack[i];
 			bh_lock_sock(sk);
 			if (!sock_owned_by_user(sk))
 				udpv6_queue_rcv_skb(sk, skb1);
 			else
 				sk_add_backlog(sk, skb1);
 			bh_unlock_sock(sk);
+		} else {
+			atomic_inc(&sk->sk_drops);
+			UDP6_INC_STATS_BH(sock_net(sk),
+					UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
+			UDP6_INC_STATS_BH(sock_net(sk),
+					UDP_MIB_INERRORS, IS_UDPLITE(sk));
 		}
 	}
 }