diff mbox series

[nf-next,3/3] netfilter: flowtable: remove dying bit, use teardown bit instead

Message ID 20200105202345.242125-3-pablo@netfilter.org
State Changes Requested
Delegated to: Pablo Neira
Headers show
Series [nf-next,1/3] netfilter: flowtable: use atomic bitwise operations for flow flags | expand

Commit Message

Pablo Neira Ayuso Jan. 5, 2020, 8:23 p.m. UTC
The dying bit removes the conntrack entry if the netdev that owns this
flow is going down. Instead, use the teardown mechanism to push back the
flow to conntrack to let the classic software path decide what to do
with it.

Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
---
 include/net/netfilter/nf_flow_table.h | 5 -----
 net/netfilter/nf_flow_table_core.c    | 8 ++------
 2 files changed, 2 insertions(+), 11 deletions(-)
diff mbox series

Patch

diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index 67bbd7b3ad4a..00dfd770c0b9 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -86,7 +86,6 @@  struct flow_offload_tuple_rhash {
 enum nf_flow_flags {
 	NF_FLOW_SNAT_BIT,
 	NF_FLOW_DNAT_BIT,
-	NF_FLOW_DYING_BIT,
 	NF_FLOW_TEARDOWN_BIT,
 	NF_FLOW_HW_BIT,
 	NF_FLOW_HW_DYING_BIT,
@@ -136,10 +135,6 @@  int nf_flow_table_init(struct nf_flowtable *flow_table);
 void nf_flow_table_free(struct nf_flowtable *flow_table);
 
 void flow_offload_teardown(struct flow_offload *flow);
-static inline void flow_offload_dead(struct flow_offload *flow)
-{
-	set_bit(NF_FLOW_DYING_BIT, &flow->flags);
-}
 
 int nf_flow_snat_port(const struct flow_offload *flow,
 		      struct sk_buff *skb, unsigned int thoff,
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index 4db29223e176..9dd282cbdc65 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -182,8 +182,6 @@  void flow_offload_free(struct flow_offload *flow)
 	default:
 		break;
 	}
-	if (test_bit(NF_FLOW_DYING_BIT, &flow->flags))
-		nf_ct_delete(flow->ct, 0, 0);
 	nf_ct_put(flow->ct);
 	kfree_rcu(flow, rcu_head);
 }
@@ -300,8 +298,7 @@  flow_offload_lookup(struct nf_flowtable *flow_table,
 
 	dir = tuplehash->tuple.dir;
 	flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
-	if (test_bit(NF_FLOW_DYING_BIT, &flow->flags) ||
-	    test_bit(NF_FLOW_TEARDOWN_BIT, &flow->flags))
+	if (test_bit(NF_FLOW_TEARDOWN_BIT, &flow->flags))
 		return NULL;
 
 	if (unlikely(nf_ct_is_dying(flow->ct)))
@@ -353,7 +350,6 @@  static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
 		nf_flow_offload_stats(flow_table, flow);
 
 	if (nf_flow_has_expired(flow) || nf_ct_is_dying(flow->ct) ||
-	    test_bit(NF_FLOW_DYING_BIT, &flow->flags) ||
 	    test_bit(NF_FLOW_TEARDOWN_BIT, &flow->flags)) {
 		if (test_bit(NF_FLOW_HW_BIT, &flow->flags)) {
 			if (!test_bit(NF_FLOW_HW_DYING_BIT, &flow->flags))
@@ -526,7 +522,7 @@  static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
 	if (net_eq(nf_ct_net(flow->ct), dev_net(dev)) &&
 	    (flow->tuplehash[0].tuple.iifidx == dev->ifindex ||
 	     flow->tuplehash[1].tuple.iifidx == dev->ifindex))
-		flow_offload_dead(flow);
+		flow_offload_teardown(flow);
 }
 
 static void nf_flow_table_iterate_cleanup(struct nf_flowtable *flowtable,