diff mbox series

[net-next,1/2] udp: if the rx queue is full, free the skb in __udp_enqueue_schedule_skb()

Message ID 8ee1d5a0d73c523c2d50dc8b289ee6de1518221f.1524045911.git.pabeni@redhat.com
State Deferred, archived
Delegated to: David Miller
Headers show
Series UDP: introduce RX skb cache | expand

Commit Message

Paolo Abeni April 18, 2018, 10:22 a.m. UTC
This commit moves the kfree_skb() call on queue full event from the
ipv4/ipv6 caller into __udp_enqueue_schedule_skb(), cleaning up the
code and avoid referencing the skb after that __udp_enqueue_schedule_skb()
completes, so that we can modify the skb ptr itself into the latter
function.

Signed-off-by: Paolo Abeni <pabeni@redhat.com>
---
 net/ipv4/udp.c | 2 +-
 net/ipv6/udp.c | 1 -
 2 files changed, 1 insertion(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 24b5c59b1c53..3fb0fbf4977d 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1341,6 +1341,7 @@  int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
 drop:
 	atomic_inc(&sk->sk_drops);
 	busylock_release(busy);
+	kfree_skb(skb);
 	return err;
 }
 EXPORT_SYMBOL_GPL(__udp_enqueue_schedule_skb);
@@ -1802,7 +1803,6 @@  static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 			UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS,
 					is_udplite);
 		UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
-		kfree_skb(skb);
 		trace_udp_fail_queue_rcv_skb(rc, sk);
 		return -1;
 	}
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 6861ed479469..c113222f7670 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -532,7 +532,6 @@  static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 			UDP6_INC_STATS(sock_net(sk),
 					 UDP_MIB_RCVBUFERRORS, is_udplite);
 		UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
-		kfree_skb(skb);
 		return -1;
 	}