diff mbox series

[net-next,v2,1/2] net/sched: act_csum: use per-core statistics

Message ID 88d6e0d86f107270a907e256d866757cc79bf254.1516366191.git.dcaratti@redhat.com
State Superseded, archived
Delegated to: David Miller
Headers show
Series net/sched: remove spinlock from 'csum' action | expand

Commit Message

Davide Caratti Jan. 19, 2018, 2:12 p.m. UTC
use per-CPU counters, like other TC actions do, instead of maintaining one
set of stats across all cores. This allows updating act_csum stats without
the need of protecting them using spin_{,un}lock_bh() invocations.

Signed-off-by: Davide Caratti <dcaratti@redhat.com>
---
 net/sched/act_csum.c | 10 ++++------
 1 file changed, 4 insertions(+), 6 deletions(-)
diff mbox series

Patch

diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index af4b8ec60d9a..df22da365cd9 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -67,7 +67,7 @@  static int tcf_csum_init(struct net *net, struct nlattr *nla,
 
 	if (!tcf_idr_check(tn, parm->index, a, bind)) {
 		ret = tcf_idr_create(tn, parm->index, est, a,
-				     &act_csum_ops, bind, false);
+				     &act_csum_ops, bind, true);
 		if (ret)
 			return ret;
 		ret = ACT_P_CREATED;
@@ -542,9 +542,9 @@  static int tcf_csum(struct sk_buff *skb, const struct tc_action *a,
 	int action;
 	u32 update_flags;
 
-	spin_lock(&p->tcf_lock);
 	tcf_lastuse_update(&p->tcf_tm);
-	bstats_update(&p->tcf_bstats, skb);
+	bstats_cpu_update(this_cpu_ptr(p->common.cpu_bstats), skb);
+	spin_lock(&p->tcf_lock);
 	action = p->tcf_action;
 	update_flags = p->update_flags;
 	spin_unlock(&p->tcf_lock);
@@ -566,9 +566,7 @@  static int tcf_csum(struct sk_buff *skb, const struct tc_action *a,
 	return action;
 
 drop:
-	spin_lock(&p->tcf_lock);
-	p->tcf_qstats.drops++;
-	spin_unlock(&p->tcf_lock);
+	qstats_drop_inc(this_cpu_ptr(p->common.cpu_qstats));
 	return TC_ACT_SHOT;
 }