@@ -48,7 +48,8 @@ struct tcindex_data {
u32 hash; /* hash table size; 0 if undefined */
u32 alloc_hash; /* allocated size */
u32 fall_through; /* 0: only classify if explicit match */
- struct rcu_head rcu;
+ struct net *net;
+ struct rcu_work rwork;
};
static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
@@ -229,15 +230,23 @@ static int tcindex_destroy_element(struct tcf_proto *tp,
return tcindex_delete(tp, arg, &last, NULL);
}
-static void __tcindex_destroy(struct rcu_head *head)
+static void __tcindex_destroy(struct tcindex_data *p)
{
- struct tcindex_data *p = container_of(head, struct tcindex_data, rcu);
-
kfree(p->perfect);
kfree(p->h);
kfree(p);
}
+static void tcindex_destroy_work(struct work_struct *work)
+{
+ struct tcindex_data *p = container_of(to_rcu_work(work),
+ struct tcindex_data,
+ rwork);
+
+ put_net(p->net);
+ __tcindex_destroy(p);
+}
+
static inline int
valid_perfect_hash(struct tcindex_data *p)
{
@@ -258,14 +267,22 @@ static int tcindex_filter_result_init(struct tcindex_filter_result *r)
return tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
}
-static void __tcindex_partial_destroy(struct rcu_head *head)
+static void __tcindex_partial_destroy(struct tcindex_data *p)
{
- struct tcindex_data *p = container_of(head, struct tcindex_data, rcu);
-
kfree(p->perfect);
kfree(p);
}
+static void tcindex_partial_destroy_work(struct work_struct *work)
+{
+ struct tcindex_data *p = container_of(to_rcu_work(work),
+ struct tcindex_data,
+ rwork);
+
+ put_net(p->net);
+ __tcindex_partial_destroy(p);
+}
+
static void tcindex_free_perfect_hash(struct tcindex_data *cp)
{
int i;
@@ -333,6 +350,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
cp->alloc_hash = p->alloc_hash;
cp->fall_through = p->fall_through;
cp->tp = tp;
+ cp->net = net;
if (p->perfect) {
int i;
@@ -477,8 +495,13 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
rcu_assign_pointer(*fp, f);
}
- if (oldp)
- call_rcu(&oldp->rcu, __tcindex_partial_destroy);
+ if (oldp) {
+ if (oldp->net && maybe_get_net(oldp->net))
+ tcf_queue_work(&oldp->rwork,
+ tcindex_partial_destroy_work);
+ else
+ __tcindex_partial_destroy(oldp);
+ }
return 0;
errout_alloc:
@@ -570,7 +593,10 @@ static void tcindex_destroy(struct tcf_proto *tp,
walker.fn = tcindex_destroy_element;
tcindex_walk(tp, &walker);
- call_rcu(&p->rcu, __tcindex_destroy);
+ if (maybe_get_net(p->net))
+ tcf_queue_work(&p->rwork, tcindex_destroy_work);
+ else
+ __tcindex_destroy(p);
}
tcindex_destroy() invokes tcindex_destroy_element() via a walker to delete each filter result in its perfect hash table, and tcindex_destroy_element() calls tcindex_delete() which schedules tcf RCU works to do the final deletion work. Unfortunately this races with the RCU callback __tcindex_destroy(), which could lead to use-after-free as reported by Adrian. Fix this by migrating this RCU callback to tcf RCU work too, as that workqueue is ordered, we will not have use-after-free. This change requires us to store a net pointer inside struct tcindex_data, to avoid the known race with tc_action_net_exit(). Fixes: 27ce4f05e2ab ("net_sched: use tcf_queue_work() in tcindex filter") Reported-by: Adrian <bugs@abtelecom.ro> Cc: Ben Hutchings <ben@decadent.org.uk> Cc: Jamal Hadi Salim <jhs@mojatatu.com> Cc: Jiri Pirko <jiri@resnulli.us> Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com> --- net/sched/cls_tcindex.c | 46 ++++++++++++++++++++++++++++++++--------- 1 file changed, 36 insertions(+), 10 deletions(-)