diff mbox series

[v2] net: openvswitch: pass NULL for unused parameters

Message ID 20200830212630.32241-1-trix@redhat.com
State Changes Requested
Delegated to: David Miller
Headers show
Series [v2] net: openvswitch: pass NULL for unused parameters | expand

Commit Message

Tom Rix Aug. 30, 2020, 9:26 p.m. UTC
From: Tom Rix <trix@redhat.com>

clang static analysis flags these problems

flow_table.c:713:2: warning: The expression is an uninitialized
  value. The computed value will also be garbage
        (*n_mask_hit)++;
        ^~~~~~~~~~~~~~~
flow_table.c:748:5: warning: The expression is an uninitialized
  value. The computed value will also be garbage
                                (*n_cache_hit)++;
                                ^~~~~~~~~~~~~~~~

These are not problems because neither parameter is used
by the calling function.

Looking at all of the calling functions, there are many
cases where the results are unused.  Passing unused
parameters is a waste.

In the case where the output mask index parameter of flow_lookup()
is not used by the caller, it is always has a value of 0.

To avoid passing unused parameters, rework the
masked_flow_lookup() and flow_lookup() routines to check
for NULL parameters and change the unused parameters to NULL.

For the mask index parameter, use a local pointer to a value of
0 if user passed in NULL.

Signed-off-by: Tom Rix <trix@redhat.com>
---
v2
- fix spelling
- add mask index to NULL parameters
---
net/openvswitch/flow_table.c | 32 +++++++++++++++-----------------
 1 file changed, 15 insertions(+), 17 deletions(-)

Comments

Eelco Chaudron Aug. 31, 2020, 7:50 a.m. UTC | #1
On 30 Aug 2020, at 23:26, trix@redhat.com wrote:

> From: Tom Rix <trix@redhat.com>
>
> clang static analysis flags these problems
>
> flow_table.c:713:2: warning: The expression is an uninitialized
>   value. The computed value will also be garbage
>         (*n_mask_hit)++;
>         ^~~~~~~~~~~~~~~
> flow_table.c:748:5: warning: The expression is an uninitialized
>   value. The computed value will also be garbage
>                                 (*n_cache_hit)++;
>                                 ^~~~~~~~~~~~~~~~
>
> These are not problems because neither parameter is used
> by the calling function.
>
> Looking at all of the calling functions, there are many
> cases where the results are unused.  Passing unused
> parameters is a waste.
>
> In the case where the output mask index parameter of flow_lookup()
> is not used by the caller, it is always has a value of 0.
>
> To avoid passing unused parameters, rework the
> masked_flow_lookup() and flow_lookup() routines to check
> for NULL parameters and change the unused parameters to NULL.
>
> For the mask index parameter, use a local pointer to a value of
> 0 if user passed in NULL.


Some of this code is fast path, and some of it is not. So maybe the 
original author did this to avoid the null checks?

Can you do some performance runs and see if it impact the performance in 
a negative way?

> Signed-off-by: Tom Rix <trix@redhat.com>
> ---
> v2
> - fix spelling
> - add mask index to NULL parameters
> ---
> net/openvswitch/flow_table.c | 32 +++++++++++++++-----------------
>  1 file changed, 15 insertions(+), 17 deletions(-)
>
> diff --git a/net/openvswitch/flow_table.c 
> b/net/openvswitch/flow_table.c
> index e2235849a57e..eac25596e4f4 100644
> --- a/net/openvswitch/flow_table.c
> +++ b/net/openvswitch/flow_table.c
> @@ -710,7 +710,8 @@ static struct sw_flow *masked_flow_lookup(struct 
> table_instance *ti,
>  	ovs_flow_mask_key(&masked_key, unmasked, false, mask);
>  	hash = flow_hash(&masked_key, &mask->range);
>  	head = find_bucket(ti, hash);
> -	(*n_mask_hit)++;
> +	if (n_mask_hit)
> +		(*n_mask_hit)++;
>
>  	hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver],
>  				lockdep_ovsl_is_held()) {
> @@ -730,12 +731,17 @@ static struct sw_flow *flow_lookup(struct 
> flow_table *tbl,
>  				   const struct sw_flow_key *key,
>  				   u32 *n_mask_hit,
>  				   u32 *n_cache_hit,
> -				   u32 *index)
> +				   u32 *in_index)
>  {
>  	u64 *usage_counters = this_cpu_ptr(ma->masks_usage_cntr);
>  	struct sw_flow *flow;
>  	struct sw_flow_mask *mask;
>  	int i;
> +	u32 idx = 0;
> +	u32 *index = &idx;
> +
> +	if (in_index)
> +		index = in_index;
>
>  	if (likely(*index < ma->max)) {
>  		mask = rcu_dereference_ovsl(ma->masks[*index]);
> @@ -745,7 +751,8 @@ static struct sw_flow *flow_lookup(struct 
> flow_table *tbl,
>  				u64_stats_update_begin(&ma->syncp);
>  				usage_counters[*index]++;
>  				u64_stats_update_end(&ma->syncp);
> -				(*n_cache_hit)++;
> +				if (n_cache_hit)
> +					(*n_cache_hit)++;
>  				return flow;
>  			}
>  		}
> @@ -796,13 +803,9 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct 
> flow_table *tbl,
>
>  	*n_mask_hit = 0;
>  	*n_cache_hit = 0;
> -	if (unlikely(!skb_hash || mc->cache_size == 0)) {
> -		u32 mask_index = 0;
> -		u32 cache = 0;
> -
> -		return flow_lookup(tbl, ti, ma, key, n_mask_hit, &cache,
> -				   &mask_index);
> -	}
> +	if (unlikely(!skb_hash || mc->cache_size == 0))
> +		return flow_lookup(tbl, ti, ma, key, n_mask_hit, NULL,
> +				   NULL);
>
>  	/* Pre and post recirulation flows usually have the same skb_hash
>  	 * value. To avoid hash collisions, rehash the 'skb_hash' with
> @@ -849,11 +852,7 @@ struct sw_flow *ovs_flow_tbl_lookup(struct 
> flow_table *tbl,
>  {
>  	struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
>  	struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
> -	u32 __always_unused n_mask_hit;
> -	u32 __always_unused n_cache_hit;
> -	u32 index = 0;
> -
> -	return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &n_cache_hit, 
> &index);
> +	return flow_lookup(tbl, ti, ma, key, NULL, NULL, NULL);
>  }
>
>  struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
> @@ -865,7 +864,6 @@ struct sw_flow *ovs_flow_tbl_lookup_exact(struct 
> flow_table *tbl,
>  	/* Always called under ovs-mutex. */
>  	for (i = 0; i < ma->max; i++) {
>  		struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
> -		u32 __always_unused n_mask_hit;
>  		struct sw_flow_mask *mask;
>  		struct sw_flow *flow;
>
> @@ -873,7 +871,7 @@ struct sw_flow *ovs_flow_tbl_lookup_exact(struct 
> flow_table *tbl,
>  		if (!mask)
>  			continue;
>
> -		flow = masked_flow_lookup(ti, match->key, mask, &n_mask_hit);
> +		flow = masked_flow_lookup(ti, match->key, mask, NULL);
>  		if (flow && ovs_identifier_is_key(&flow->id) &&
>  		    ovs_flow_cmp_unmasked_key(flow, match)) {
>  			return flow;
> -- 
> 2.18.1
Eelco Chaudron Aug. 31, 2020, 7:56 a.m. UTC | #2
On 31 Aug 2020, at 9:50, Eelco Chaudron wrote:

> On 30 Aug 2020, at 23:26, trix@redhat.com wrote:
>
>> From: Tom Rix <trix@redhat.com>
>>
>> clang static analysis flags these problems
>>
>> flow_table.c:713:2: warning: The expression is an uninitialized
>>   value. The computed value will also be garbage
>>         (*n_mask_hit)++;
>>         ^~~~~~~~~~~~~~~
>> flow_table.c:748:5: warning: The expression is an uninitialized
>>   value. The computed value will also be garbage
>>                                 (*n_cache_hit)++;
>>                                 ^~~~~~~~~~~~~~~~
>>
>> These are not problems because neither parameter is used
>> by the calling function.
>>
>> Looking at all of the calling functions, there are many
>> cases where the results are unused.  Passing unused
>> parameters is a waste.
>>
>> In the case where the output mask index parameter of flow_lookup()
>> is not used by the caller, it is always has a value of 0.
>>
>> To avoid passing unused parameters, rework the
>> masked_flow_lookup() and flow_lookup() routines to check
>> for NULL parameters and change the unused parameters to NULL.
>>
>> For the mask index parameter, use a local pointer to a value of
>> 0 if user passed in NULL.
>
>
> Some of this code is fast path, and some of it is not. So maybe the 
> original author did this to avoid the null checks?
>
> Can you do some performance runs and see if it impact the performance 
> in a negative way?

Forgot to add that if you do some performance tests, make sure you have 
some 70+ masks as this is where you do a lot of null checks vs only 
one-time use of a variable, see ovs_flow_tbl_lookup_stats().

>> Signed-off-by: Tom Rix <trix@redhat.com>
>> ---
>> v2
>> - fix spelling
>> - add mask index to NULL parameters
>> ---
>> net/openvswitch/flow_table.c | 32 +++++++++++++++-----------------
>>  1 file changed, 15 insertions(+), 17 deletions(-)
>>
>> diff --git a/net/openvswitch/flow_table.c 
>> b/net/openvswitch/flow_table.c
>> index e2235849a57e..eac25596e4f4 100644
>> --- a/net/openvswitch/flow_table.c
>> +++ b/net/openvswitch/flow_table.c
>> @@ -710,7 +710,8 @@ static struct sw_flow *masked_flow_lookup(struct 
>> table_instance *ti,
>>  	ovs_flow_mask_key(&masked_key, unmasked, false, mask);
>>  	hash = flow_hash(&masked_key, &mask->range);
>>  	head = find_bucket(ti, hash);
>> -	(*n_mask_hit)++;
>> +	if (n_mask_hit)
>> +		(*n_mask_hit)++;
>>
>>  	hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver],
>>  				lockdep_ovsl_is_held()) {
>> @@ -730,12 +731,17 @@ static struct sw_flow *flow_lookup(struct 
>> flow_table *tbl,
>>  				   const struct sw_flow_key *key,
>>  				   u32 *n_mask_hit,
>>  				   u32 *n_cache_hit,
>> -				   u32 *index)
>> +				   u32 *in_index)
>>  {
>>  	u64 *usage_counters = this_cpu_ptr(ma->masks_usage_cntr);
>>  	struct sw_flow *flow;
>>  	struct sw_flow_mask *mask;
>>  	int i;
>> +	u32 idx = 0;
>> +	u32 *index = &idx;
>> +
>> +	if (in_index)
>> +		index = in_index;
>>
>>  	if (likely(*index < ma->max)) {
>>  		mask = rcu_dereference_ovsl(ma->masks[*index]);
>> @@ -745,7 +751,8 @@ static struct sw_flow *flow_lookup(struct 
>> flow_table *tbl,
>>  				u64_stats_update_begin(&ma->syncp);
>>  				usage_counters[*index]++;
>>  				u64_stats_update_end(&ma->syncp);
>> -				(*n_cache_hit)++;
>> +				if (n_cache_hit)
>> +					(*n_cache_hit)++;
>>  				return flow;
>>  			}
>>  		}
>> @@ -796,13 +803,9 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct 
>> flow_table *tbl,
>>
>>  	*n_mask_hit = 0;
>>  	*n_cache_hit = 0;
>> -	if (unlikely(!skb_hash || mc->cache_size == 0)) {
>> -		u32 mask_index = 0;
>> -		u32 cache = 0;
>> -
>> -		return flow_lookup(tbl, ti, ma, key, n_mask_hit, &cache,
>> -				   &mask_index);
>> -	}
>> +	if (unlikely(!skb_hash || mc->cache_size == 0))
>> +		return flow_lookup(tbl, ti, ma, key, n_mask_hit, NULL,
>> +				   NULL);
>>
>>  	/* Pre and post recirulation flows usually have the same skb_hash
>>  	 * value. To avoid hash collisions, rehash the 'skb_hash' with
>> @@ -849,11 +852,7 @@ struct sw_flow *ovs_flow_tbl_lookup(struct 
>> flow_table *tbl,
>>  {
>>  	struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
>>  	struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
>> -	u32 __always_unused n_mask_hit;
>> -	u32 __always_unused n_cache_hit;
>> -	u32 index = 0;
>> -
>> -	return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &n_cache_hit, 
>> &index);
>> +	return flow_lookup(tbl, ti, ma, key, NULL, NULL, NULL);
>>  }
>>
>>  struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
>> @@ -865,7 +864,6 @@ struct sw_flow *ovs_flow_tbl_lookup_exact(struct 
>> flow_table *tbl,
>>  	/* Always called under ovs-mutex. */
>>  	for (i = 0; i < ma->max; i++) {
>>  		struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
>> -		u32 __always_unused n_mask_hit;
>>  		struct sw_flow_mask *mask;
>>  		struct sw_flow *flow;
>>
>> @@ -873,7 +871,7 @@ struct sw_flow *ovs_flow_tbl_lookup_exact(struct 
>> flow_table *tbl,
>>  		if (!mask)
>>  			continue;
>>
>> -		flow = masked_flow_lookup(ti, match->key, mask, &n_mask_hit);
>> +		flow = masked_flow_lookup(ti, match->key, mask, NULL);
>>  		if (flow && ovs_identifier_is_key(&flow->id) &&
>>  		    ovs_flow_cmp_unmasked_key(flow, match)) {
>>  			return flow;
>> -- 
>> 2.18.1
David Miller Sept. 1, 2020, 8:11 p.m. UTC | #3
From: trix@redhat.com
Date: Sun, 30 Aug 2020 14:26:30 -0700

> Passing unused parameters is a waste.

Poorly predicted branches are an even bigger waste.

I'm not a big fan of this change and others have asked for performance
analysis to be performed.

So I'm not applying this as-is, sorry.

It's also not great to see that CLANG can't make use of the caller's
__always_unused directive to guide these warnings.
Tom Rix Sept. 2, 2020, 6:40 p.m. UTC | #4
On 9/1/20 1:11 PM, David Miller wrote:
> From: trix@redhat.com
> Date: Sun, 30 Aug 2020 14:26:30 -0700
>
>> Passing unused parameters is a waste.
> Poorly predicted branches are an even bigger waste.
>
> I'm not a big fan of this change and others have asked for performance
> analysis to be performed.
>
> So I'm not applying this as-is, sorry.

no worries.

I think these functions need a larger working over so the stats collecting are not in the core functions.

Thanks for giving it a look,

Tom

>
> It's also not great to see that CLANG can't make use of the caller's
> __always_unused directive to guide these warnings.
diff mbox series

Patch

diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index e2235849a57e..eac25596e4f4 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -710,7 +710,8 @@  static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
 	ovs_flow_mask_key(&masked_key, unmasked, false, mask);
 	hash = flow_hash(&masked_key, &mask->range);
 	head = find_bucket(ti, hash);
-	(*n_mask_hit)++;
+	if (n_mask_hit)
+		(*n_mask_hit)++;
 
 	hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver],
 				lockdep_ovsl_is_held()) {
@@ -730,12 +731,17 @@  static struct sw_flow *flow_lookup(struct flow_table *tbl,
 				   const struct sw_flow_key *key,
 				   u32 *n_mask_hit,
 				   u32 *n_cache_hit,
-				   u32 *index)
+				   u32 *in_index)
 {
 	u64 *usage_counters = this_cpu_ptr(ma->masks_usage_cntr);
 	struct sw_flow *flow;
 	struct sw_flow_mask *mask;
 	int i;
+	u32 idx = 0;
+	u32 *index = &idx;
+
+	if (in_index)
+		index = in_index;
 
 	if (likely(*index < ma->max)) {
 		mask = rcu_dereference_ovsl(ma->masks[*index]);
@@ -745,7 +751,8 @@  static struct sw_flow *flow_lookup(struct flow_table *tbl,
 				u64_stats_update_begin(&ma->syncp);
 				usage_counters[*index]++;
 				u64_stats_update_end(&ma->syncp);
-				(*n_cache_hit)++;
+				if (n_cache_hit)
+					(*n_cache_hit)++;
 				return flow;
 			}
 		}
@@ -796,13 +803,9 @@  struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
 
 	*n_mask_hit = 0;
 	*n_cache_hit = 0;
-	if (unlikely(!skb_hash || mc->cache_size == 0)) {
-		u32 mask_index = 0;
-		u32 cache = 0;
-
-		return flow_lookup(tbl, ti, ma, key, n_mask_hit, &cache,
-				   &mask_index);
-	}
+	if (unlikely(!skb_hash || mc->cache_size == 0))
+		return flow_lookup(tbl, ti, ma, key, n_mask_hit, NULL,
+				   NULL);
 
 	/* Pre and post recirulation flows usually have the same skb_hash
 	 * value. To avoid hash collisions, rehash the 'skb_hash' with
@@ -849,11 +852,7 @@  struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
 {
 	struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
 	struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
-	u32 __always_unused n_mask_hit;
-	u32 __always_unused n_cache_hit;
-	u32 index = 0;
-
-	return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &n_cache_hit, &index);
+	return flow_lookup(tbl, ti, ma, key, NULL, NULL, NULL);
 }
 
 struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
@@ -865,7 +864,6 @@  struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
 	/* Always called under ovs-mutex. */
 	for (i = 0; i < ma->max; i++) {
 		struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
-		u32 __always_unused n_mask_hit;
 		struct sw_flow_mask *mask;
 		struct sw_flow *flow;
 
@@ -873,7 +871,7 @@  struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
 		if (!mask)
 			continue;
 
-		flow = masked_flow_lookup(ti, match->key, mask, &n_mask_hit);
+		flow = masked_flow_lookup(ti, match->key, mask, NULL);
 		if (flow && ovs_identifier_is_key(&flow->id) &&
 		    ovs_flow_cmp_unmasked_key(flow, match)) {
 			return flow;