@@ -3444,10 +3444,8 @@ static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
q = rcu_dereference(rxq->qdisc);
if (q != &noop_qdisc) {
- spin_lock(qdisc_lock(q));
if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
result = qdisc_enqueue_root(skb, q);
- spin_unlock(qdisc_lock(q));
}
return result;
@@ -88,6 +88,11 @@ static int ingress_enqueue(struct sk_buff *skb, struct Qdisc *sch)
}
/* ------------------------------------------------------------- */
+static int ingress_init(struct Qdisc *sch, struct nlattr *opt)
+{
+ sch->flags |= TCQ_F_LLQDISC;
+ return 0;
+}
static void ingress_destroy(struct Qdisc *sch)
{
@@ -122,6 +127,7 @@ static const struct Qdisc_class_ops ingress_class_ops = {
};
static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
+ .init = ingress_init,
.cl_ops = &ingress_class_ops,
.id = "ingress",
.priv_size = sizeof(struct ingress_qdisc_data),
After the previous patches to make the filters RCU safe and support per cpu counters we can drop the qdisc lock around the ingress qdisc hook. This is possible because the ingress qdisc is a very basic qdisc and only updates stats and runs tc_classify. Its the simplest qdiscs we have. In order for the per-cpu counters to get invoked the ingress qdisc must set the LLQDISC flag. Signed-off-by: John Fastabend <john.r.fastabend@intel.com> --- net/core/dev.c | 2 -- net/sched/sch_ingress.c | 6 ++++++ 2 files changed, 6 insertions(+), 2 deletions(-) -- To unsubscribe from this list: send the line "unsubscribe netdev" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html