diff mbox series

[net-next,08/13] net/sched: extend matchall offload for hardware statistics

Message ID 20190504114628.14755-9-jakub.kicinski@netronome.com
State Accepted
Delegated to: David Miller
Headers show
Series net: act_police offload support | expand

Commit Message

Jakub Kicinski May 4, 2019, 11:46 a.m. UTC
From: Pieter Jansen van Vuuren <pieter.jansenvanvuuren@netronome.com>

Introduce a new command for matchall classifiers that allows hardware
to update statistics.

Signed-off-by: Pieter Jansen van Vuuren <pieter.jansenvanvuuren@netronome.com>
Reviewed-by: Jakub Kicinski <jakub.kicinski@netronome.com>
---
 include/net/pkt_cls.h    |  2 ++
 net/sched/cls_matchall.c | 20 ++++++++++++++++++++
 2 files changed, 22 insertions(+)

Comments

Jiri Pirko May 4, 2019, 1:10 p.m. UTC | #1
Sat, May 04, 2019 at 01:46:23PM CEST, jakub.kicinski@netronome.com wrote:
>From: Pieter Jansen van Vuuren <pieter.jansenvanvuuren@netronome.com>
>
>Introduce a new command for matchall classifiers that allows hardware
>to update statistics.
>
>Signed-off-by: Pieter Jansen van Vuuren <pieter.jansenvanvuuren@netronome.com>
>Reviewed-by: Jakub Kicinski <jakub.kicinski@netronome.com>

Acked-by: Jiri Pirko <jiri@mellanox.com>
diff mbox series

Patch

diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 2d0470661277..161fcf8516ac 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -760,12 +760,14 @@  tc_cls_flower_offload_flow_rule(struct tc_cls_flower_offload *tc_flow_cmd)
 enum tc_matchall_command {
 	TC_CLSMATCHALL_REPLACE,
 	TC_CLSMATCHALL_DESTROY,
+	TC_CLSMATCHALL_STATS,
 };
 
 struct tc_cls_matchall_offload {
 	struct tc_cls_common_offload common;
 	enum tc_matchall_command command;
 	struct flow_rule *rule;
+	struct flow_stats stats;
 	unsigned long cookie;
 };
 
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index 87bff17ac782..da916f39b719 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -321,6 +321,23 @@  static int mall_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
 	return 0;
 }
 
+static void mall_stats_hw_filter(struct tcf_proto *tp,
+				 struct cls_mall_head *head,
+				 unsigned long cookie)
+{
+	struct tc_cls_matchall_offload cls_mall = {};
+	struct tcf_block *block = tp->chain->block;
+
+	tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, NULL);
+	cls_mall.command = TC_CLSMATCHALL_STATS;
+	cls_mall.cookie = cookie;
+
+	tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, false);
+
+	tcf_exts_stats_update(&head->exts, cls_mall.stats.bytes,
+			      cls_mall.stats.pkts, cls_mall.stats.lastused);
+}
+
 static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh,
 		     struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
 {
@@ -332,6 +349,9 @@  static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh,
 	if (!head)
 		return skb->len;
 
+	if (!tc_skip_hw(head->flags))
+		mall_stats_hw_filter(tp, head, (unsigned long)head);
+
 	t->tcm_handle = head->handle;
 
 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);