@@ -925,4 +925,15 @@ enum {
#define TCA_CBS_MAX (__TCA_CBS_MAX - 1)
+/* Ingress/clsact */
+
+enum {
+ TCA_CLSACT_UNSPEC,
+ TCA_CLSACT_INGRESS_BLOCK,
+ TCA_CLSACT_EGRESS_BLOCK,
+ __TCA_CLSACT_MAX
+};
+
+#define TCA_CLSACT_MAX (__TCA_CLSACT_MAX - 1)
+
#endif
@@ -60,6 +60,29 @@ static void clsact_chain_head_change(struct tcf_proto *tp_head, void *priv)
struct mini_Qdisc_pair *miniqp = priv;
mini_qdisc_pair_swap(miniqp, tp_head);
+};
+
+static const struct nla_policy ingress_policy[TCA_CLSACT_MAX + 1] = {
+ [TCA_CLSACT_INGRESS_BLOCK] = { .type = NLA_U32 },
+};
+
+static int ingress_parse_opt(struct nlattr *opt, u32 *p_ingress_block_index)
+{
+ struct nlattr *tb[TCA_CLSACT_MAX + 1];
+ int err;
+
+ *p_ingress_block_index = 0;
+
+ if (!opt)
+ return 0;
+ err = nla_parse_nested(tb, TCA_CLSACT_MAX, opt, ingress_policy, NULL);
+ if (err)
+ return err;
+
+ if (tb[TCA_CLSACT_INGRESS_BLOCK])
+ *p_ingress_block_index =
+ nla_get_u32(tb[TCA_CLSACT_INGRESS_BLOCK]);
+ return 0;
}
static int ingress_init(struct Qdisc *sch, struct nlattr *opt)
@@ -70,6 +93,11 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt)
mini_qdisc_pair_init(&q->miniqp, sch, &dev->miniq_ingress);
+ err = ingress_parse_opt(opt, &q->block_info.block_index);
+ if (err)
+ return err;
+
+ q->block_info.shareable = true;
q->block_info.binder_type = TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
q->block_info.chain_head_change = clsact_chain_head_change;
q->block_info.chain_head_change_priv = &q->miniqp;
@@ -94,11 +122,14 @@ static void ingress_destroy(struct Qdisc *sch)
static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb)
{
+ struct ingress_sched_data *q = qdisc_priv(sch);
struct nlattr *nest;
nest = nla_nest_start(skb, TCA_OPTIONS);
if (nest == NULL)
goto nla_put_failure;
+ if (nla_put_u32(skb, TCA_CLSACT_INGRESS_BLOCK, q->block->index))
+ goto nla_put_failure;
return nla_nest_end(skb, nest);
@@ -166,6 +197,35 @@ static struct tcf_block *clsact_tcf_block(struct Qdisc *sch, unsigned long cl)
}
}
+static const struct nla_policy clsact_policy[TCA_CLSACT_MAX + 1] = {
+ [TCA_CLSACT_INGRESS_BLOCK] = { .type = NLA_U32 },
+ [TCA_CLSACT_EGRESS_BLOCK] = { .type = NLA_U32 },
+};
+
+static int clsact_parse_opt(struct nlattr *opt, u32 *p_ingress_block_index,
+ u32 *p_egress_block_index)
+{
+ struct nlattr *tb[TCA_CLSACT_MAX + 1];
+ int err;
+
+ *p_ingress_block_index = 0;
+ *p_egress_block_index = 0;
+
+ if (!opt)
+ return 0;
+ err = nla_parse_nested(tb, TCA_CLSACT_MAX, opt, clsact_policy, NULL);
+ if (err)
+ return err;
+
+ if (tb[TCA_CLSACT_INGRESS_BLOCK])
+ *p_ingress_block_index =
+ nla_get_u32(tb[TCA_CLSACT_INGRESS_BLOCK]);
+ if (tb[TCA_CLSACT_EGRESS_BLOCK])
+ *p_egress_block_index =
+ nla_get_u32(tb[TCA_CLSACT_EGRESS_BLOCK]);
+ return 0;
+}
+
static int clsact_init(struct Qdisc *sch, struct nlattr *opt)
{
struct clsact_sched_data *q = qdisc_priv(sch);
@@ -174,6 +234,12 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt)
mini_qdisc_pair_init(&q->miniqp_ingress, sch, &dev->miniq_ingress);
+ err = clsact_parse_opt(opt, &q->ingress_block_info.block_index,
+ &q->egress_block_info.block_index);
+ if (err)
+ return err;
+
+ q->ingress_block_info.shareable = true;
q->ingress_block_info.binder_type = TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
q->ingress_block_info.chain_head_change = clsact_chain_head_change;
q->ingress_block_info.chain_head_change_priv = &q->miniqp_ingress;
@@ -184,6 +250,7 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt)
mini_qdisc_pair_init(&q->miniqp_egress, sch, &dev->miniq_egress);
+ q->egress_block_info.shareable = true;
q->egress_block_info.binder_type = TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS;
q->egress_block_info.chain_head_change = clsact_chain_head_change;
q->egress_block_info.chain_head_change_priv = &q->miniqp_egress;
@@ -215,6 +282,26 @@ static void clsact_destroy(struct Qdisc *sch)
net_dec_egress_queue();
}
+static int clsact_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct clsact_sched_data *q = qdisc_priv(sch);
+ struct nlattr *nest;
+
+ nest = nla_nest_start(skb, TCA_OPTIONS);
+ if (!nest)
+ goto nla_put_failure;
+ if (nla_put_u32(skb, TCA_CLSACT_INGRESS_BLOCK, q->ingress_block->index))
+ goto nla_put_failure;
+ if (nla_put_u32(skb, TCA_CLSACT_EGRESS_BLOCK, q->egress_block->index))
+ goto nla_put_failure;
+
+ return nla_nest_end(skb, nest);
+
+nla_put_failure:
+ nla_nest_cancel(skb, nest);
+ return -1;
+}
+
static const struct Qdisc_class_ops clsact_class_ops = {
.leaf = ingress_leaf,
.find = clsact_find,
@@ -230,7 +317,7 @@ static struct Qdisc_ops clsact_qdisc_ops __read_mostly = {
.priv_size = sizeof(struct clsact_sched_data),
.init = clsact_init,
.destroy = clsact_destroy,
- .dump = ingress_dump,
+ .dump = clsact_dump,
.owner = THIS_MODULE,
};