diff mbox series

[v2,3/3] net: Restrict receive packets queuing to housekeeping CPUs

Message ID 20200622234510.240834-4-nitesh@redhat.com
State New
Headers show
Series Preventing job distribution to isolated CPUs | expand

Commit Message

Nitesh Narayan Lal June 22, 2020, 11:45 p.m. UTC
From: Alex Belits <abelits@marvell.com>

With the existing implementation of store_rps_map(), packets are queued
in the receive path on the backlog queues of other CPUs irrespective of
whether they are isolated or not. This could add a latency overhead to
any RT workload that is running on the same CPU.

Ensure that store_rps_map() only uses available housekeeping CPUs for
storing the rps_map.

Signed-off-by: Alex Belits <abelits@marvell.com>
Signed-off-by: Nitesh Narayan Lal <nitesh@redhat.com>
---
 net/core/net-sysfs.c | 10 +++++++++-
 1 file changed, 9 insertions(+), 1 deletion(-)

Comments

Peter Zijlstra June 23, 2020, 9:23 a.m. UTC | #1
On Mon, Jun 22, 2020 at 07:45:10PM -0400, Nitesh Narayan Lal wrote:
> @@ -756,6 +757,13 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
>  		return err;
>  	}
>  
> +	hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ;
> +	cpumask_and(mask, mask, housekeeping_cpumask(hk_flags));
> +	if (cpumask_weight(mask) == 0) {

We have cpumask_empty() for that, which is a much more efficient way of
testing the same.

> +		free_cpumask_var(mask);
> +		return -EINVAL;
> +	}
> +
>  	map = kzalloc(max_t(unsigned int,
>  			    RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
>  		      GFP_KERNEL);
> -- 
> 2.18.4
>
Nitesh Narayan Lal June 23, 2020, 11:42 a.m. UTC | #2
On 6/23/20 5:23 AM, Peter Zijlstra wrote:
> On Mon, Jun 22, 2020 at 07:45:10PM -0400, Nitesh Narayan Lal wrote:
>> @@ -756,6 +757,13 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
>>  		return err;
>>  	}
>>  
>> +	hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ;
>> +	cpumask_and(mask, mask, housekeeping_cpumask(hk_flags));
>> +	if (cpumask_weight(mask) == 0) {
> We have cpumask_empty() for that, which is a much more efficient way of
> testing the same.

Yes, right.
I will make this change.

>
>> +		free_cpumask_var(mask);
>> +		return -EINVAL;
>> +	}
>> +
>>  	map = kzalloc(max_t(unsigned int,
>>  			    RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
>>  		      GFP_KERNEL);
>> -- 
>> 2.18.4
>>
diff mbox series

Patch

diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index e353b822bb15..16e433287191 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -11,6 +11,7 @@ 
 #include <linux/if_arp.h>
 #include <linux/slab.h>
 #include <linux/sched/signal.h>
+#include <linux/sched/isolation.h>
 #include <linux/nsproxy.h>
 #include <net/sock.h>
 #include <net/net_namespace.h>
@@ -741,7 +742,7 @@  static ssize_t store_rps_map(struct netdev_rx_queue *queue,
 {
 	struct rps_map *old_map, *map;
 	cpumask_var_t mask;
-	int err, cpu, i;
+	int err, cpu, i, hk_flags;
 	static DEFINE_MUTEX(rps_map_mutex);
 
 	if (!capable(CAP_NET_ADMIN))
@@ -756,6 +757,13 @@  static ssize_t store_rps_map(struct netdev_rx_queue *queue,
 		return err;
 	}
 
+	hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ;
+	cpumask_and(mask, mask, housekeeping_cpumask(hk_flags));
+	if (cpumask_weight(mask) == 0) {
+		free_cpumask_var(mask);
+		return -EINVAL;
+	}
+
 	map = kzalloc(max_t(unsigned int,
 			    RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
 		      GFP_KERNEL);