diff mbox series

[net-next,04/12] net: sched: flower: track filter deletion with flag

Message ID 20190214074712.17846-5-vladbu@mellanox.com
State Changes Requested
Delegated to: David Miller
Headers show
Series Refactor flower classifier to remove dependency on rtnl lock | expand

Commit Message

Vlad Buslov Feb. 14, 2019, 7:47 a.m. UTC
In order to prevent double deletion of filter by concurrent tasks when rtnl
lock is not used for synchronization, add 'deleted' filter field. Check
value of this field when modifying filters and return error if concurrent
deletion is detected.

Refactor __fl_delete() to accept pointer to 'last' boolean as argument,
and return error code as function return value instead. This is necessary
to signal concurrent filter delete to caller.

Signed-off-by: Vlad Buslov <vladbu@mellanox.com>
Acked-by: Jiri Pirko <jiri@mellanox.com>
---
 net/sched/cls_flower.c | 55 ++++++++++++++++++++++++++++++++++----------------
 1 file changed, 38 insertions(+), 17 deletions(-)

Comments

Stefano Brivio Feb. 14, 2019, 8:49 p.m. UTC | #1
On Thu, 14 Feb 2019 09:47:04 +0200
Vlad Buslov <vladbu@mellanox.com> wrote:

> +static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
> +		       bool *last, struct netlink_ext_ack *extack)

This would be easier to follow (at least for me):

>  {
>  	struct cls_fl_head *head = fl_head_dereference(tp);
>  	bool async = tcf_exts_get_net(&f->exts);
> -	bool last;
> -
> -	idr_remove(&head->handle_idr, f->handle);
> -	list_del_rcu(&f->list);
> -	last = fl_mask_put(head, f->mask, async);
> -	if (!tc_skip_hw(f->flags))
> -		fl_hw_destroy_filter(tp, f, extack);
> -	tcf_unbind_filter(tp, &f->res);
> -	__fl_put(f);
> +	int err = 0;

without this

> +
> +	(*last) = false;

with *last = false;

> +
> +	if (!f->deleted) {

with:
	if (f->deleted)
		return -ENOENT;

> +		f->deleted = true;
> +		rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
> +				       f->mask->filter_ht_params);
> +		idr_remove(&head->handle_idr, f->handle);
> +		list_del_rcu(&f->list);
> +		(*last) = fl_mask_put(head, f->mask, async);

with:
	*last = fl_mask_put(head, f->mask, async);

> +		if (!tc_skip_hw(f->flags))
> +			fl_hw_destroy_filter(tp, f, extack);
> +		tcf_unbind_filter(tp, &f->res);
> +		__fl_put(f);

and a return 0; here

> +	} else {
> +		err = -ENOENT;
> +	}
>  
> -	return last;
> +	return err;
>  }
>  
> [...]
>
> @@ -1520,14 +1541,14 @@ static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
>  {
>  	struct cls_fl_head *head = fl_head_dereference(tp);
>  	struct cls_fl_filter *f = arg;
> +	bool last_on_mask;

This is unused in this series, maybe change __fl_delete() to optionally
take NULL as 'bool *last' argument?

> +	int err = 0;

Nit: no need to initialise this.
 
> -	rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
> -			       f->mask->filter_ht_params);
> -	__fl_delete(tp, f, extack);
> +	err = __fl_delete(tp, f, &last_on_mask, extack);
>  	*last = list_empty(&head->masks);
>  	__fl_put(f);
>  
> -	return 0;
> +	return err;
>  }
>  
>  static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
Vlad Buslov Feb. 15, 2019, 3:54 p.m. UTC | #2
On Thu 14 Feb 2019 at 20:49, Stefano Brivio <sbrivio@redhat.com> wrote:
> On Thu, 14 Feb 2019 09:47:04 +0200
> Vlad Buslov <vladbu@mellanox.com> wrote:
>
>> +static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
>> +		       bool *last, struct netlink_ext_ack *extack)
>
> This would be easier to follow (at least for me):
>
>>  {
>>  	struct cls_fl_head *head = fl_head_dereference(tp);
>>  	bool async = tcf_exts_get_net(&f->exts);
>> -	bool last;
>> -
>> -	idr_remove(&head->handle_idr, f->handle);
>> -	list_del_rcu(&f->list);
>> -	last = fl_mask_put(head, f->mask, async);
>> -	if (!tc_skip_hw(f->flags))
>> -		fl_hw_destroy_filter(tp, f, extack);
>> -	tcf_unbind_filter(tp, &f->res);
>> -	__fl_put(f);
>> +	int err = 0;
>
> without this
>
>> +
>> +	(*last) = false;
>
> with *last = false;
>
>> +
>> +	if (!f->deleted) {
>
> with:
> 	if (f->deleted)
> 		return -ENOENT;
>
>> +		f->deleted = true;
>> +		rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
>> +				       f->mask->filter_ht_params);
>> +		idr_remove(&head->handle_idr, f->handle);
>> +		list_del_rcu(&f->list);
>> +		(*last) = fl_mask_put(head, f->mask, async);
>
> with:
> 	*last = fl_mask_put(head, f->mask, async);
>
>> +		if (!tc_skip_hw(f->flags))
>> +			fl_hw_destroy_filter(tp, f, extack);
>> +		tcf_unbind_filter(tp, &f->res);
>> +		__fl_put(f);
>
> and a return 0; here

Agree, this function looks better when structured in the way you
suggest. Will change it in V2.

>
>> +	} else {
>> +		err = -ENOENT;
>> +	}
>>  
>> -	return last;
>> +	return err;
>>  }
>>  
>> [...]
>>
>> @@ -1520,14 +1541,14 @@ static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
>>  {
>>  	struct cls_fl_head *head = fl_head_dereference(tp);
>>  	struct cls_fl_filter *f = arg;
>> +	bool last_on_mask;
>
> This is unused in this series, maybe change __fl_delete() to optionally
> take NULL as 'bool *last' argument?

It was implemented like that originally, but on internal review with
Jiri we decided that having unused variable here is better than
complicating __fl_delete() with support for "last" being NULL without
good reason.

>
>> +	int err = 0;
>
> Nit: no need to initialise this.

Yes, but I always regret having uninitialized variables in my functions
later on :(

>  
>> -	rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
>> -			       f->mask->filter_ht_params);
>> -	__fl_delete(tp, f, extack);
>> +	err = __fl_delete(tp, f, &last_on_mask, extack);
>>  	*last = list_empty(&head->masks);
>>  	__fl_put(f);
>>  
>> -	return 0;
>> +	return err;
>>  }
>>  
>>  static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
diff mbox series

Patch

diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index b216ed26f344..fa5465f890e1 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -110,6 +110,7 @@  struct cls_fl_filter {
 	 * synchronization. Use atomic reference counter to be concurrency-safe.
 	 */
 	refcount_t refcnt;
+	bool deleted;
 };
 
 static const struct rhashtable_params mask_ht_params = {
@@ -454,6 +455,8 @@  static void __fl_put(struct cls_fl_filter *f)
 	if (!refcount_dec_and_test(&f->refcnt))
 		return;
 
+	WARN_ON(!f->deleted);
+
 	if (tcf_exts_get_net(&f->exts))
 		tcf_queue_work(&f->rwork, fl_destroy_filter_work);
 	else
@@ -490,22 +493,31 @@  static struct cls_fl_filter *fl_get_next_filter(struct tcf_proto *tp,
 	return f;
 }
 
-static bool __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
-			struct netlink_ext_ack *extack)
+static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
+		       bool *last, struct netlink_ext_ack *extack)
 {
 	struct cls_fl_head *head = fl_head_dereference(tp);
 	bool async = tcf_exts_get_net(&f->exts);
-	bool last;
-
-	idr_remove(&head->handle_idr, f->handle);
-	list_del_rcu(&f->list);
-	last = fl_mask_put(head, f->mask, async);
-	if (!tc_skip_hw(f->flags))
-		fl_hw_destroy_filter(tp, f, extack);
-	tcf_unbind_filter(tp, &f->res);
-	__fl_put(f);
+	int err = 0;
+
+	(*last) = false;
+
+	if (!f->deleted) {
+		f->deleted = true;
+		rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
+				       f->mask->filter_ht_params);
+		idr_remove(&head->handle_idr, f->handle);
+		list_del_rcu(&f->list);
+		(*last) = fl_mask_put(head, f->mask, async);
+		if (!tc_skip_hw(f->flags))
+			fl_hw_destroy_filter(tp, f, extack);
+		tcf_unbind_filter(tp, &f->res);
+		__fl_put(f);
+	} else {
+		err = -ENOENT;
+	}
 
-	return last;
+	return err;
 }
 
 static void fl_destroy_sleepable(struct work_struct *work)
@@ -525,10 +537,12 @@  static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
 	struct cls_fl_head *head = fl_head_dereference(tp);
 	struct fl_flow_mask *mask, *next_mask;
 	struct cls_fl_filter *f, *next;
+	bool last;
 
 	list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
 		list_for_each_entry_safe(f, next, &mask->filters, list) {
-			if (__fl_delete(tp, f, extack))
+			__fl_delete(tp, f, &last, extack);
+			if (last)
 				break;
 		}
 	}
@@ -1439,6 +1453,12 @@  static int fl_change(struct net *net, struct sk_buff *in_skb,
 
 	refcount_inc(&fnew->refcnt);
 	if (fold) {
+		/* Fold filter was deleted concurrently. Retry lookup. */
+		if (fold->deleted) {
+			err = -EAGAIN;
+			goto errout_hw;
+		}
+
 		fnew->handle = handle;
 
 		err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node,
@@ -1451,6 +1471,7 @@  static int fl_change(struct net *net, struct sk_buff *in_skb,
 				       fold->mask->filter_ht_params);
 		idr_replace(&head->handle_idr, fnew, fnew->handle);
 		list_replace_rcu(&fold->list, &fnew->list);
+		fold->deleted = true;
 
 		if (!tc_skip_hw(fold->flags))
 			fl_hw_destroy_filter(tp, fold, NULL);
@@ -1520,14 +1541,14 @@  static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
 {
 	struct cls_fl_head *head = fl_head_dereference(tp);
 	struct cls_fl_filter *f = arg;
+	bool last_on_mask;
+	int err = 0;
 
-	rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
-			       f->mask->filter_ht_params);
-	__fl_delete(tp, f, extack);
+	err = __fl_delete(tp, f, &last_on_mask, extack);
 	*last = list_empty(&head->masks);
 	__fl_put(f);
 
-	return 0;
+	return err;
 }
 
 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,