[bpf-next,v2,02/18] sockmap: convert refcnt to an atomic refcnt

Message ID 20180312192314.8039.32041.stgit@john-Precision-Tower-5810
State Changes Requested
Delegated to: BPF Maintainers
Headers show
  • bpf,sockmap: sendmsg/sendfile ULP
Related show

Commit Message

John Fastabend March 12, 2018, 7:23 p.m.
The sockmap refcnt up until now has been wrapped in the
sk_callback_lock(). So its not actually needed any locking of its
own. The counter itself tracks the lifetime of the psock object.
Sockets in a sockmap have a lifetime that is independent of the
map they are part of. This is possible because a single socket may
be in multiple maps. When this happens we can only release the
psock data associated with the socket when the refcnt reaches
zero. There are three possible delete sock reference decrement
paths first through the normal sockmap process, the user deletes
the socket from the map. Second the map is removed and all sockets
in the map are removed, delete path is similar to case 1. The third
case is an asyncronous socket event such as a closing the socket. The
last case handles removing sockets that are no longer available.
For completeness, although inc does not pose any problems in this
patch series, the inc case only happens when a psock is added to a

Next we plan to add another socket prog type to handle policy and
monitoring on the TX path. When we do this however we will need to
keep a reference count open across the sendmsg/sendpage call and
holding the sk_callback_lock() here (on every send) seems less than
ideal, also it may sleep in cases where we hit memory pressure.
Instead of dealing with these issues in some clever way simply make
the reference counting a refcnt_t type and do proper atomic ops.

Signed-off-by: John Fastabend <john.fastabend@gmail.com>
Acked-by: David S. Miller <davem@davemloft.net>
 kernel/bpf/sockmap.c |   23 ++++++++++-------------
 1 file changed, 10 insertions(+), 13 deletions(-)


diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index a927e89..051b2242 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -62,8 +62,7 @@  struct smap_psock_map_entry {
 struct smap_psock {
 	struct rcu_head	rcu;
-	/* refcnt is used inside sk_callback_lock */
-	u32 refcnt;
+	refcount_t refcnt;
 	/* datapath variables */
 	struct sk_buff_head rxqueue;
@@ -373,15 +372,13 @@  static void smap_destroy_psock(struct rcu_head *rcu)
 static void smap_release_sock(struct smap_psock *psock, struct sock *sock)
-	psock->refcnt--;
-	if (psock->refcnt)
-		return;
-	tcp_cleanup_ulp(sock);
-	smap_stop_sock(psock, sock);
-	clear_bit(SMAP_TX_RUNNING, &psock->state);
-	rcu_assign_sk_user_data(sock, NULL);
-	call_rcu_sched(&psock->rcu, smap_destroy_psock);
+	if (refcount_dec_and_test(&psock->refcnt)) {
+		tcp_cleanup_ulp(sock);
+		smap_stop_sock(psock, sock);
+		clear_bit(SMAP_TX_RUNNING, &psock->state);
+		rcu_assign_sk_user_data(sock, NULL);
+		call_rcu_sched(&psock->rcu, smap_destroy_psock);
+	}
 static int smap_parse_func_strparser(struct strparser *strp,
@@ -511,7 +508,7 @@  static struct smap_psock *smap_init_psock(struct sock *sock,
 	INIT_WORK(&psock->tx_work, smap_tx_work);
 	INIT_WORK(&psock->gc_work, smap_gc_work);
-	psock->refcnt = 1;
+	refcount_set(&psock->refcnt, 1);
 	rcu_assign_sk_user_data(sock, psock);
@@ -772,7 +769,7 @@  static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
 			err = -EBUSY;
 			goto out_progs;
-		psock->refcnt++;
+		refcount_inc(&psock->refcnt);
 	} else {
 		psock = smap_init_psock(sock, stab);
 		if (IS_ERR(psock)) {