diff mbox

[nf-next,1/3] netfilter: restart nf ct cleanup if hash resize happen

Message ID 1495345149-57674-2-git-send-email-zlpnobody@163.com
State Changes Requested
Delegated to: Pablo Neira
Headers show

Commit Message

Liping Zhang May 21, 2017, 5:39 a.m. UTC
From: Liping Zhang <zlpnobody@gmail.com>

Similar to commit 474803d37e7f ("netfilter: cttimeout: unlink timeout
obj again when hash resize happen"), when hash resize happen, we should
try to do cleanup work from the 0#bucket again, so we will never miss
the conntrack entries which we are intrested in. This is important for
the module removal.

Signed-off-by: Liping Zhang <zlpnobody@gmail.com>
---
 net/netfilter/nf_conntrack_core.c | 35 +++++++++++++++++++++++------------
 1 file changed, 23 insertions(+), 12 deletions(-)

Comments

Florian Westphal May 21, 2017, 8:09 a.m. UTC | #1
Liping Zhang <zlpnobody@163.com> wrote:
> From: Liping Zhang <zlpnobody@gmail.com>
> 
> Similar to commit 474803d37e7f ("netfilter: cttimeout: unlink timeout
> obj again when hash resize happen"), when hash resize happen, we should
> try to do cleanup work from the 0#bucket again, so we will never miss
> the conntrack entries which we are intrested in. This is important for
> the module removal.

Right.  However, I suggest to use the seqcount for this, as done here:

https://git.breakpoint.cc/cgit/fw/nf-next.git/commit/?h=nfct_iterate_cleanup_15&id=b4f7c51617df8c79e0f28d03c6ebc2c157b1fecd

--
To unsubscribe from this list: send the line "unsubscribe netfilter-devel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index e847dba..dec4c2a 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1587,7 +1587,7 @@  static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
 /* Bring out ya dead! */
 static struct nf_conn *
 get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
-		void *data, unsigned int *bucket)
+		void *data, unsigned int *bucket, unsigned int *hsize)
 {
 	struct nf_conntrack_tuple_hash *h;
 	struct nf_conn *ct;
@@ -1595,20 +1595,28 @@  get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
 	int cpu;
 	spinlock_t *lockp;
 
-	for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
+	for (; *bucket < *hsize; (*bucket)++) {
 		lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
 		local_bh_disable();
 		nf_conntrack_lock(lockp);
-		if (*bucket < nf_conntrack_htable_size) {
-			hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnnode) {
-				if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
-					continue;
-				ct = nf_ct_tuplehash_to_ctrack(h);
-				if (net_eq(nf_ct_net(ct), net) &&
-				    iter(ct, data))
-					goto found;
-			}
+
+		/* nf conntrack hash resize happened. */
+		if (*hsize != nf_conntrack_htable_size) {
+			*hsize = nf_conntrack_htable_size;
+			*bucket = 0;
+			goto cont;
+		}
+
+		hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[*bucket],
+					   hnnode) {
+			if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
+				continue;
+			ct = nf_ct_tuplehash_to_ctrack(h);
+			if (net_eq(nf_ct_net(ct), net) &&
+			    iter(ct, data))
+				goto found;
 		}
+cont:
 		spin_unlock(lockp);
 		local_bh_enable();
 		cond_resched();
@@ -1640,13 +1648,16 @@  void nf_ct_iterate_cleanup(struct net *net,
 {
 	struct nf_conn *ct;
 	unsigned int bucket = 0;
+	unsigned int hsize;
 
 	might_sleep();
 
 	if (atomic_read(&net->ct.count) == 0)
 		return;
 
-	while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
+	hsize = nf_conntrack_htable_size;
+	while ((ct = get_next_corpse(net, iter, data, &bucket,
+				     &hsize)) != NULL) {
 		/* Time to push up daises... */
 
 		nf_ct_delete(ct, portid, report);