Netdev Archive on lore.kernel.org
help / color / mirror / Atom feed
* [PATCH v2] net: openvswitch: pass NULL for unused parameters
@ 2020-08-30 21:26 trix
  2020-08-31  7:50 ` Eelco Chaudron
  2020-09-01 20:11 ` David Miller
  0 siblings, 2 replies; 5+ messages in thread
From: trix @ 2020-08-30 21:26 UTC (permalink / raw)
  To: pshelar, davem, kuba, natechancellor, ndesaulniers
  Cc: netdev, dev, linux-kernel, Tom Rix

From: Tom Rix <trix@redhat.com>

clang static analysis flags these problems

flow_table.c:713:2: warning: The expression is an uninitialized
  value. The computed value will also be garbage
        (*n_mask_hit)++;
        ^~~~~~~~~~~~~~~
flow_table.c:748:5: warning: The expression is an uninitialized
  value. The computed value will also be garbage
                                (*n_cache_hit)++;
                                ^~~~~~~~~~~~~~~~

These are not problems because neither parameter is used
by the calling function.

Looking at all of the calling functions, there are many
cases where the results are unused.  Passing unused
parameters is a waste.

In the case where the output mask index parameter of flow_lookup()
is not used by the caller, it is always has a value of 0.

To avoid passing unused parameters, rework the
masked_flow_lookup() and flow_lookup() routines to check
for NULL parameters and change the unused parameters to NULL.

For the mask index parameter, use a local pointer to a value of
0 if user passed in NULL.

Signed-off-by: Tom Rix <trix@redhat.com>
---
v2
- fix spelling
- add mask index to NULL parameters
---
net/openvswitch/flow_table.c | 32 +++++++++++++++-----------------
 1 file changed, 15 insertions(+), 17 deletions(-)

diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index e2235849a57e..eac25596e4f4 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -710,7 +710,8 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
 	ovs_flow_mask_key(&masked_key, unmasked, false, mask);
 	hash = flow_hash(&masked_key, &mask->range);
 	head = find_bucket(ti, hash);
-	(*n_mask_hit)++;
+	if (n_mask_hit)
+		(*n_mask_hit)++;
 
 	hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver],
 				lockdep_ovsl_is_held()) {
@@ -730,12 +731,17 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
 				   const struct sw_flow_key *key,
 				   u32 *n_mask_hit,
 				   u32 *n_cache_hit,
-				   u32 *index)
+				   u32 *in_index)
 {
 	u64 *usage_counters = this_cpu_ptr(ma->masks_usage_cntr);
 	struct sw_flow *flow;
 	struct sw_flow_mask *mask;
 	int i;
+	u32 idx = 0;
+	u32 *index = &idx;
+
+	if (in_index)
+		index = in_index;
 
 	if (likely(*index < ma->max)) {
 		mask = rcu_dereference_ovsl(ma->masks[*index]);
@@ -745,7 +751,8 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
 				u64_stats_update_begin(&ma->syncp);
 				usage_counters[*index]++;
 				u64_stats_update_end(&ma->syncp);
-				(*n_cache_hit)++;
+				if (n_cache_hit)
+					(*n_cache_hit)++;
 				return flow;
 			}
 		}
@@ -796,13 +803,9 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
 
 	*n_mask_hit = 0;
 	*n_cache_hit = 0;
-	if (unlikely(!skb_hash || mc->cache_size == 0)) {
-		u32 mask_index = 0;
-		u32 cache = 0;
-
-		return flow_lookup(tbl, ti, ma, key, n_mask_hit, &cache,
-				   &mask_index);
-	}
+	if (unlikely(!skb_hash || mc->cache_size == 0))
+		return flow_lookup(tbl, ti, ma, key, n_mask_hit, NULL,
+				   NULL);
 
 	/* Pre and post recirulation flows usually have the same skb_hash
 	 * value. To avoid hash collisions, rehash the 'skb_hash' with
@@ -849,11 +852,7 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
 {
 	struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
 	struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
-	u32 __always_unused n_mask_hit;
-	u32 __always_unused n_cache_hit;
-	u32 index = 0;
-
-	return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &n_cache_hit, &index);
+	return flow_lookup(tbl, ti, ma, key, NULL, NULL, NULL);
 }
 
 struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
@@ -865,7 +864,6 @@ struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
 	/* Always called under ovs-mutex. */
 	for (i = 0; i < ma->max; i++) {
 		struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
-		u32 __always_unused n_mask_hit;
 		struct sw_flow_mask *mask;
 		struct sw_flow *flow;
 
@@ -873,7 +871,7 @@ struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
 		if (!mask)
 			continue;
 
-		flow = masked_flow_lookup(ti, match->key, mask, &n_mask_hit);
+		flow = masked_flow_lookup(ti, match->key, mask, NULL);
 		if (flow && ovs_identifier_is_key(&flow->id) &&
 		    ovs_flow_cmp_unmasked_key(flow, match)) {
 			return flow;
-- 
2.18.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v2] net: openvswitch: pass NULL for unused parameters
  2020-08-30 21:26 [PATCH v2] net: openvswitch: pass NULL for unused parameters trix
@ 2020-08-31  7:50 ` Eelco Chaudron
  2020-08-31  7:56   ` Eelco Chaudron
  2020-09-01 20:11 ` David Miller
  1 sibling, 1 reply; 5+ messages in thread
From: Eelco Chaudron @ 2020-08-31  7:50 UTC (permalink / raw)
  To: trix
  Cc: pshelar, davem, kuba, natechancellor, ndesaulniers, netdev, dev,
	linux-kernel



On 30 Aug 2020, at 23:26, trix@redhat.com wrote:

> From: Tom Rix <trix@redhat.com>
>
> clang static analysis flags these problems
>
> flow_table.c:713:2: warning: The expression is an uninitialized
>   value. The computed value will also be garbage
>         (*n_mask_hit)++;
>         ^~~~~~~~~~~~~~~
> flow_table.c:748:5: warning: The expression is an uninitialized
>   value. The computed value will also be garbage
>                                 (*n_cache_hit)++;
>                                 ^~~~~~~~~~~~~~~~
>
> These are not problems because neither parameter is used
> by the calling function.
>
> Looking at all of the calling functions, there are many
> cases where the results are unused.  Passing unused
> parameters is a waste.
>
> In the case where the output mask index parameter of flow_lookup()
> is not used by the caller, it is always has a value of 0.
>
> To avoid passing unused parameters, rework the
> masked_flow_lookup() and flow_lookup() routines to check
> for NULL parameters and change the unused parameters to NULL.
>
> For the mask index parameter, use a local pointer to a value of
> 0 if user passed in NULL.


Some of this code is fast path, and some of it is not. So maybe the 
original author did this to avoid the null checks?

Can you do some performance runs and see if it impact the performance in 
a negative way?

> Signed-off-by: Tom Rix <trix@redhat.com>
> ---
> v2
> - fix spelling
> - add mask index to NULL parameters
> ---
> net/openvswitch/flow_table.c | 32 +++++++++++++++-----------------
>  1 file changed, 15 insertions(+), 17 deletions(-)
>
> diff --git a/net/openvswitch/flow_table.c 
> b/net/openvswitch/flow_table.c
> index e2235849a57e..eac25596e4f4 100644
> --- a/net/openvswitch/flow_table.c
> +++ b/net/openvswitch/flow_table.c
> @@ -710,7 +710,8 @@ static struct sw_flow *masked_flow_lookup(struct 
> table_instance *ti,
>  	ovs_flow_mask_key(&masked_key, unmasked, false, mask);
>  	hash = flow_hash(&masked_key, &mask->range);
>  	head = find_bucket(ti, hash);
> -	(*n_mask_hit)++;
> +	if (n_mask_hit)
> +		(*n_mask_hit)++;
>
>  	hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver],
>  				lockdep_ovsl_is_held()) {
> @@ -730,12 +731,17 @@ static struct sw_flow *flow_lookup(struct 
> flow_table *tbl,
>  				   const struct sw_flow_key *key,
>  				   u32 *n_mask_hit,
>  				   u32 *n_cache_hit,
> -				   u32 *index)
> +				   u32 *in_index)
>  {
>  	u64 *usage_counters = this_cpu_ptr(ma->masks_usage_cntr);
>  	struct sw_flow *flow;
>  	struct sw_flow_mask *mask;
>  	int i;
> +	u32 idx = 0;
> +	u32 *index = &idx;
> +
> +	if (in_index)
> +		index = in_index;
>
>  	if (likely(*index < ma->max)) {
>  		mask = rcu_dereference_ovsl(ma->masks[*index]);
> @@ -745,7 +751,8 @@ static struct sw_flow *flow_lookup(struct 
> flow_table *tbl,
>  				u64_stats_update_begin(&ma->syncp);
>  				usage_counters[*index]++;
>  				u64_stats_update_end(&ma->syncp);
> -				(*n_cache_hit)++;
> +				if (n_cache_hit)
> +					(*n_cache_hit)++;
>  				return flow;
>  			}
>  		}
> @@ -796,13 +803,9 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct 
> flow_table *tbl,
>
>  	*n_mask_hit = 0;
>  	*n_cache_hit = 0;
> -	if (unlikely(!skb_hash || mc->cache_size == 0)) {
> -		u32 mask_index = 0;
> -		u32 cache = 0;
> -
> -		return flow_lookup(tbl, ti, ma, key, n_mask_hit, &cache,
> -				   &mask_index);
> -	}
> +	if (unlikely(!skb_hash || mc->cache_size == 0))
> +		return flow_lookup(tbl, ti, ma, key, n_mask_hit, NULL,
> +				   NULL);
>
>  	/* Pre and post recirulation flows usually have the same skb_hash
>  	 * value. To avoid hash collisions, rehash the 'skb_hash' with
> @@ -849,11 +852,7 @@ struct sw_flow *ovs_flow_tbl_lookup(struct 
> flow_table *tbl,
>  {
>  	struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
>  	struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
> -	u32 __always_unused n_mask_hit;
> -	u32 __always_unused n_cache_hit;
> -	u32 index = 0;
> -
> -	return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &n_cache_hit, 
> &index);
> +	return flow_lookup(tbl, ti, ma, key, NULL, NULL, NULL);
>  }
>
>  struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
> @@ -865,7 +864,6 @@ struct sw_flow *ovs_flow_tbl_lookup_exact(struct 
> flow_table *tbl,
>  	/* Always called under ovs-mutex. */
>  	for (i = 0; i < ma->max; i++) {
>  		struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
> -		u32 __always_unused n_mask_hit;
>  		struct sw_flow_mask *mask;
>  		struct sw_flow *flow;
>
> @@ -873,7 +871,7 @@ struct sw_flow *ovs_flow_tbl_lookup_exact(struct 
> flow_table *tbl,
>  		if (!mask)
>  			continue;
>
> -		flow = masked_flow_lookup(ti, match->key, mask, &n_mask_hit);
> +		flow = masked_flow_lookup(ti, match->key, mask, NULL);
>  		if (flow && ovs_identifier_is_key(&flow->id) &&
>  		    ovs_flow_cmp_unmasked_key(flow, match)) {
>  			return flow;
> -- 
> 2.18.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v2] net: openvswitch: pass NULL for unused parameters
  2020-08-31  7:50 ` Eelco Chaudron
@ 2020-08-31  7:56   ` Eelco Chaudron
  0 siblings, 0 replies; 5+ messages in thread
From: Eelco Chaudron @ 2020-08-31  7:56 UTC (permalink / raw)
  To: trix
  Cc: pshelar, davem, kuba, natechancellor, ndesaulniers, netdev, dev,
	linux-kernel



On 31 Aug 2020, at 9:50, Eelco Chaudron wrote:

> On 30 Aug 2020, at 23:26, trix@redhat.com wrote:
>
>> From: Tom Rix <trix@redhat.com>
>>
>> clang static analysis flags these problems
>>
>> flow_table.c:713:2: warning: The expression is an uninitialized
>>   value. The computed value will also be garbage
>>         (*n_mask_hit)++;
>>         ^~~~~~~~~~~~~~~
>> flow_table.c:748:5: warning: The expression is an uninitialized
>>   value. The computed value will also be garbage
>>                                 (*n_cache_hit)++;
>>                                 ^~~~~~~~~~~~~~~~
>>
>> These are not problems because neither parameter is used
>> by the calling function.
>>
>> Looking at all of the calling functions, there are many
>> cases where the results are unused.  Passing unused
>> parameters is a waste.
>>
>> In the case where the output mask index parameter of flow_lookup()
>> is not used by the caller, it is always has a value of 0.
>>
>> To avoid passing unused parameters, rework the
>> masked_flow_lookup() and flow_lookup() routines to check
>> for NULL parameters and change the unused parameters to NULL.
>>
>> For the mask index parameter, use a local pointer to a value of
>> 0 if user passed in NULL.
>
>
> Some of this code is fast path, and some of it is not. So maybe the 
> original author did this to avoid the null checks?
>
> Can you do some performance runs and see if it impact the performance 
> in a negative way?

Forgot to add that if you do some performance tests, make sure you have 
some 70+ masks as this is where you do a lot of null checks vs only 
one-time use of a variable, see ovs_flow_tbl_lookup_stats().

>> Signed-off-by: Tom Rix <trix@redhat.com>
>> ---
>> v2
>> - fix spelling
>> - add mask index to NULL parameters
>> ---
>> net/openvswitch/flow_table.c | 32 +++++++++++++++-----------------
>>  1 file changed, 15 insertions(+), 17 deletions(-)
>>
>> diff --git a/net/openvswitch/flow_table.c 
>> b/net/openvswitch/flow_table.c
>> index e2235849a57e..eac25596e4f4 100644
>> --- a/net/openvswitch/flow_table.c
>> +++ b/net/openvswitch/flow_table.c
>> @@ -710,7 +710,8 @@ static struct sw_flow *masked_flow_lookup(struct 
>> table_instance *ti,
>>  	ovs_flow_mask_key(&masked_key, unmasked, false, mask);
>>  	hash = flow_hash(&masked_key, &mask->range);
>>  	head = find_bucket(ti, hash);
>> -	(*n_mask_hit)++;
>> +	if (n_mask_hit)
>> +		(*n_mask_hit)++;
>>
>>  	hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver],
>>  				lockdep_ovsl_is_held()) {
>> @@ -730,12 +731,17 @@ static struct sw_flow *flow_lookup(struct 
>> flow_table *tbl,
>>  				   const struct sw_flow_key *key,
>>  				   u32 *n_mask_hit,
>>  				   u32 *n_cache_hit,
>> -				   u32 *index)
>> +				   u32 *in_index)
>>  {
>>  	u64 *usage_counters = this_cpu_ptr(ma->masks_usage_cntr);
>>  	struct sw_flow *flow;
>>  	struct sw_flow_mask *mask;
>>  	int i;
>> +	u32 idx = 0;
>> +	u32 *index = &idx;
>> +
>> +	if (in_index)
>> +		index = in_index;
>>
>>  	if (likely(*index < ma->max)) {
>>  		mask = rcu_dereference_ovsl(ma->masks[*index]);
>> @@ -745,7 +751,8 @@ static struct sw_flow *flow_lookup(struct 
>> flow_table *tbl,
>>  				u64_stats_update_begin(&ma->syncp);
>>  				usage_counters[*index]++;
>>  				u64_stats_update_end(&ma->syncp);
>> -				(*n_cache_hit)++;
>> +				if (n_cache_hit)
>> +					(*n_cache_hit)++;
>>  				return flow;
>>  			}
>>  		}
>> @@ -796,13 +803,9 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct 
>> flow_table *tbl,
>>
>>  	*n_mask_hit = 0;
>>  	*n_cache_hit = 0;
>> -	if (unlikely(!skb_hash || mc->cache_size == 0)) {
>> -		u32 mask_index = 0;
>> -		u32 cache = 0;
>> -
>> -		return flow_lookup(tbl, ti, ma, key, n_mask_hit, &cache,
>> -				   &mask_index);
>> -	}
>> +	if (unlikely(!skb_hash || mc->cache_size == 0))
>> +		return flow_lookup(tbl, ti, ma, key, n_mask_hit, NULL,
>> +				   NULL);
>>
>>  	/* Pre and post recirulation flows usually have the same skb_hash
>>  	 * value. To avoid hash collisions, rehash the 'skb_hash' with
>> @@ -849,11 +852,7 @@ struct sw_flow *ovs_flow_tbl_lookup(struct 
>> flow_table *tbl,
>>  {
>>  	struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
>>  	struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
>> -	u32 __always_unused n_mask_hit;
>> -	u32 __always_unused n_cache_hit;
>> -	u32 index = 0;
>> -
>> -	return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &n_cache_hit, 
>> &index);
>> +	return flow_lookup(tbl, ti, ma, key, NULL, NULL, NULL);
>>  }
>>
>>  struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
>> @@ -865,7 +864,6 @@ struct sw_flow *ovs_flow_tbl_lookup_exact(struct 
>> flow_table *tbl,
>>  	/* Always called under ovs-mutex. */
>>  	for (i = 0; i < ma->max; i++) {
>>  		struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
>> -		u32 __always_unused n_mask_hit;
>>  		struct sw_flow_mask *mask;
>>  		struct sw_flow *flow;
>>
>> @@ -873,7 +871,7 @@ struct sw_flow *ovs_flow_tbl_lookup_exact(struct 
>> flow_table *tbl,
>>  		if (!mask)
>>  			continue;
>>
>> -		flow = masked_flow_lookup(ti, match->key, mask, &n_mask_hit);
>> +		flow = masked_flow_lookup(ti, match->key, mask, NULL);
>>  		if (flow && ovs_identifier_is_key(&flow->id) &&
>>  		    ovs_flow_cmp_unmasked_key(flow, match)) {
>>  			return flow;
>> -- 
>> 2.18.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v2] net: openvswitch: pass NULL for unused parameters
  2020-08-30 21:26 [PATCH v2] net: openvswitch: pass NULL for unused parameters trix
  2020-08-31  7:50 ` Eelco Chaudron
@ 2020-09-01 20:11 ` David Miller
  2020-09-02 18:40   ` Tom Rix
  1 sibling, 1 reply; 5+ messages in thread
From: David Miller @ 2020-09-01 20:11 UTC (permalink / raw)
  To: trix
  Cc: pshelar, kuba, natechancellor, ndesaulniers, netdev, dev, linux-kernel

From: trix@redhat.com
Date: Sun, 30 Aug 2020 14:26:30 -0700

> Passing unused parameters is a waste.

Poorly predicted branches are an even bigger waste.

I'm not a big fan of this change and others have asked for performance
analysis to be performed.

So I'm not applying this as-is, sorry.

It's also not great to see that CLANG can't make use of the caller's
__always_unused directive to guide these warnings.

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v2] net: openvswitch: pass NULL for unused parameters
  2020-09-01 20:11 ` David Miller
@ 2020-09-02 18:40   ` Tom Rix
  0 siblings, 0 replies; 5+ messages in thread
From: Tom Rix @ 2020-09-02 18:40 UTC (permalink / raw)
  To: David Miller
  Cc: pshelar, kuba, natechancellor, ndesaulniers, netdev, dev, linux-kernel


On 9/1/20 1:11 PM, David Miller wrote:
> From: trix@redhat.com
> Date: Sun, 30 Aug 2020 14:26:30 -0700
>
>> Passing unused parameters is a waste.
> Poorly predicted branches are an even bigger waste.
>
> I'm not a big fan of this change and others have asked for performance
> analysis to be performed.
>
> So I'm not applying this as-is, sorry.

no worries.

I think these functions need a larger working over so the stats collecting are not in the core functions.

Thanks for giving it a look,

Tom

>
> It's also not great to see that CLANG can't make use of the caller's
> __always_unused directive to guide these warnings.


^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2020-09-02 18:40 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-08-30 21:26 [PATCH v2] net: openvswitch: pass NULL for unused parameters trix
2020-08-31  7:50 ` Eelco Chaudron
2020-08-31  7:56   ` Eelco Chaudron
2020-09-01 20:11 ` David Miller
2020-09-02 18:40   ` Tom Rix

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).