diff mbox series

[net-next,1/1] net: sched: ensure tc flower reoffload takes filter ref

Message ID 1554245600-11513-1-git-send-email-john.hurley@netronome.com
State Accepted
Delegated to: David Miller
Headers show
Series [net-next,1/1] net: sched: ensure tc flower reoffload takes filter ref | expand

Commit Message

John Hurley April 2, 2019, 10:53 p.m. UTC
Recent changes to TC flower remove the requirement for rtnl lock when
accessing and modifying filters. Refcounts now ensure access and deletion
do not happen concurrently. However, the reoffload function which cycles
through all filters and replays them to registered hw drivers is not
protected.

Use the fl_get_next_filter() function to cycle the filters for reoffload
and ensure the ref taken by this function is put when done with each
filter.

Signed-off-by: John Hurley <john.hurley@netronome.com>
Reviewed-by: Jakub Kicinski <jakub.kicinski@netronome.com>
---
 net/sched/cls_flower.c | 88 ++++++++++++++++++++++++++------------------------
 1 file changed, 46 insertions(+), 42 deletions(-)

Comments

Vlad Buslov April 3, 2019, 3:53 p.m. UTC | #1
On Wed 03 Apr 2019 at 01:53, John Hurley <john.hurley@netronome.com> wrote:
> Recent changes to TC flower remove the requirement for rtnl lock when
> accessing and modifying filters. Refcounts now ensure access and deletion
> do not happen concurrently. However, the reoffload function which cycles
> through all filters and replays them to registered hw drivers is not
> protected.
>
> Use the fl_get_next_filter() function to cycle the filters for reoffload
> and ensure the ref taken by this function is put when done with each
> filter.
>
> Signed-off-by: John Hurley <john.hurley@netronome.com>
> Reviewed-by: Jakub Kicinski <jakub.kicinski@netronome.com>

Hi John,

I have a very similar implementation in my next patch set that
implements unlocked hw offloads API, though I implemented helpers
fl_get_next_mask() and fl_get_next_hw_filter_on_mask() to traverse
filters with their linked list instead of performing idr lookup on each
iteration. However, I'm not sure this optimization is necessary because
offloading to hardware is supposedly much more costly than idr lookup
anyway.

Thanks for doing this and FWIW:

Reviewed-by: Vlad Buslov <vladbu@mellanox.com>
David Miller April 5, 2019, 12:20 a.m. UTC | #2
From: John Hurley <john.hurley@netronome.com>
Date: Tue,  2 Apr 2019 23:53:20 +0100

> Recent changes to TC flower remove the requirement for rtnl lock when
> accessing and modifying filters. Refcounts now ensure access and deletion
> do not happen concurrently. However, the reoffload function which cycles
> through all filters and replays them to registered hw drivers is not
> protected.
> 
> Use the fl_get_next_filter() function to cycle the filters for reoffload
> and ensure the ref taken by this function is put when done with each
> filter.
> 
> Signed-off-by: John Hurley <john.hurley@netronome.com>
> Reviewed-by: Jakub Kicinski <jakub.kicinski@netronome.com>

Applied, thank you.
diff mbox series

Patch

diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 0638f17..6050e3c 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -1683,59 +1683,63 @@  static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
 static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
 			void *cb_priv, struct netlink_ext_ack *extack)
 {
-	struct cls_fl_head *head = fl_head_dereference(tp);
 	struct tc_cls_flower_offload cls_flower = {};
 	struct tcf_block *block = tp->chain->block;
-	struct fl_flow_mask *mask;
+	unsigned long handle = 0;
 	struct cls_fl_filter *f;
 	int err;
 
-	list_for_each_entry(mask, &head->masks, list) {
-		list_for_each_entry(f, &mask->filters, list) {
-			if (tc_skip_hw(f->flags))
-				continue;
-
-			cls_flower.rule =
-				flow_rule_alloc(tcf_exts_num_actions(&f->exts));
-			if (!cls_flower.rule)
-				return -ENOMEM;
-
-			tc_cls_common_offload_init(&cls_flower.common, tp,
-						   f->flags, extack);
-			cls_flower.command = add ?
-				TC_CLSFLOWER_REPLACE : TC_CLSFLOWER_DESTROY;
-			cls_flower.cookie = (unsigned long)f;
-			cls_flower.rule->match.dissector = &mask->dissector;
-			cls_flower.rule->match.mask = &mask->key;
-			cls_flower.rule->match.key = &f->mkey;
-
-			err = tc_setup_flow_action(&cls_flower.rule->action,
-						   &f->exts);
-			if (err) {
-				kfree(cls_flower.rule);
-				if (tc_skip_sw(f->flags)) {
-					NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
-					return err;
-				}
-				continue;
-			}
+	while ((f = fl_get_next_filter(tp, &handle))) {
+		if (tc_skip_hw(f->flags))
+			goto next_flow;
 
-			cls_flower.classid = f->res.classid;
+		cls_flower.rule =
+			flow_rule_alloc(tcf_exts_num_actions(&f->exts));
+		if (!cls_flower.rule) {
+			__fl_put(f);
+			return -ENOMEM;
+		}
 
-			err = cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv);
+		tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
+					   extack);
+		cls_flower.command = add ?
+			TC_CLSFLOWER_REPLACE : TC_CLSFLOWER_DESTROY;
+		cls_flower.cookie = (unsigned long)f;
+		cls_flower.rule->match.dissector = &f->mask->dissector;
+		cls_flower.rule->match.mask = &f->mask->key;
+		cls_flower.rule->match.key = &f->mkey;
+
+		err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
+		if (err) {
 			kfree(cls_flower.rule);
-
-			if (err) {
-				if (add && tc_skip_sw(f->flags))
-					return err;
-				continue;
+			if (tc_skip_sw(f->flags)) {
+				NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
+				__fl_put(f);
+				return err;
 			}
+			goto next_flow;
+		}
 
-			spin_lock(&tp->lock);
-			tc_cls_offload_cnt_update(block, &f->in_hw_count,
-						  &f->flags, add);
-			spin_unlock(&tp->lock);
+		cls_flower.classid = f->res.classid;
+
+		err = cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv);
+		kfree(cls_flower.rule);
+
+		if (err) {
+			if (add && tc_skip_sw(f->flags)) {
+				__fl_put(f);
+				return err;
+			}
+			goto next_flow;
 		}
+
+		spin_lock(&tp->lock);
+		tc_cls_offload_cnt_update(block, &f->in_hw_count, &f->flags,
+					  add);
+		spin_unlock(&tp->lock);
+next_flow:
+		handle++;
+		__fl_put(f);
 	}
 
 	return 0;