diff mbox series

[ovs-dev,v1,1/1] net: openvswitch: reduce cpu_used_mask memory

Message ID OS3P286MB2295B54F54AA44E4BD22C458F5D19@OS3P286MB2295.JPNP286.PROD.OUTLOOK.COM
State Superseded
Headers show
Series [ovs-dev,v1,1/1] net: openvswitch: reduce cpu_used_mask memory | expand

Checks

Context Check Description
ovsrobot/apply-robot fail apply and check: fail

Commit Message

缘 陶 Feb. 1, 2023, 5:59 a.m. UTC
From: eddytaoyuan <taoyuan_eddy@hotmail.com>

Signed-off-by: eddytaoyuan <taoyuan_eddy@hotmail.com>
---
 datapath/flow.c       |  6 +++---
 datapath/flow.h       |  2 +-
 datapath/flow_table.c | 28 +++++++++++++++++++++++++---
 3 files changed, 29 insertions(+), 7 deletions(-)
diff mbox series

Patch

diff --git a/datapath/flow.c b/datapath/flow.c
index 5a00c238c..ef2bd30c8 100644
--- a/datapath/flow.c
+++ b/datapath/flow.c
@@ -118,7 +118,7 @@  void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
 
 					rcu_assign_pointer(flow->stats[cpu],
 							   new_stats);
-					cpumask_set_cpu(cpu, &flow->cpu_used_mask);
+					cpumask_set_cpu(cpu, flow->cpu_used_mask);
 					goto unlock;
 				}
 			}
@@ -146,7 +146,7 @@  void ovs_flow_stats_get(const struct sw_flow *flow,
 	memset(ovs_stats, 0, sizeof(*ovs_stats));
 
 	/* We open code this to make sure cpu 0 is always considered */
-	for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
+	for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, flow->cpu_used_mask)) {
 		struct sw_flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]);
 
 		if (stats) {
@@ -170,7 +170,7 @@  void ovs_flow_stats_clear(struct sw_flow *flow)
 	int cpu;
 
 	/* We open code this to make sure cpu 0 is always considered */
-	for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
+	for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, flow->cpu_used_mask)) {
 		struct sw_flow_stats *stats = ovsl_dereference(flow->stats[cpu]);
 
 		if (stats) {
diff --git a/datapath/flow.h b/datapath/flow.h
index 584d9f565..2383155cc 100644
--- a/datapath/flow.h
+++ b/datapath/flow.h
@@ -227,7 +227,7 @@  struct sw_flow {
 					 */
 	struct sw_flow_key key;
 	struct sw_flow_id id;
-	struct cpumask cpu_used_mask;
+	struct cpumask *cpu_used_mask;
 	struct sw_flow_mask *mask;
 	struct sw_flow_actions __rcu *sf_acts;
 	struct sw_flow_stats __rcu *stats[]; /* One for each CPU.  First one
diff --git a/datapath/flow_table.c b/datapath/flow_table.c
index 650338fb0..0b2a8e905 100644
--- a/datapath/flow_table.c
+++ b/datapath/flow_table.c
@@ -100,11 +100,12 @@  struct sw_flow *ovs_flow_alloc(void)
 	if (!stats)
 		goto err;
 
+    flow->cpu_used_mask = (struct cpumask *)&(flow->stats[nr_cpu_ids]);
 	spin_lock_init(&stats->lock);
 
 	RCU_INIT_POINTER(flow->stats[0], stats);
 
-	cpumask_set_cpu(0, &flow->cpu_used_mask);
+	cpumask_set_cpu(0, flow->cpu_used_mask);
 
 	return flow;
 err:
@@ -126,7 +127,7 @@  static void flow_free(struct sw_flow *flow)
 	if (flow->sf_acts)
 		ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts);
 	/* We open code this to make sure cpu 0 is always considered */
-	for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask))
+	for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, flow->cpu_used_mask))
 		if (flow->stats[cpu])
 			kmem_cache_free(flow_stats_cache,
 					rcu_dereference_raw(flow->stats[cpu]));
@@ -961,9 +962,30 @@  int ovs_flow_init(void)
 	BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
 	BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
 
+        /* Directly defining 'struct cpumask' in 'struct sw_flow' has
+         * drawbacks:
+         * *It takes memory unnecessarily (by default 1000 bytes or so)
+         *  The reason is that compilation option CONFIG_NR_CPUS decides
+         *  the value of NR_CPUS, which in turn decides size of
+         *  'struct cpumask', while in practice we should use
+         *  using local machine's CPU count instead.
+         * *Flow creation needs cycles to initialize them
+         * *Flow deletion/get/clear needs cycles to iterate through them
+         *
+         * To address this, cpu_used_mask used the size for local
+         * machine's real CPU count instead of NR_CPUS.
+         *
+         * 'struct sw_flow' already has one FAM(Flexible Array Memember)
+         * 'stats' on the tail, C structure does not allow for one more
+         * FAM in sw_flow.
+         *
+         * Final solution is extending memory in stats to make room
+         * for cpu_used_mask at the end.
+         * This is how we have 'cpumask_size()' below
+         */
 	flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
 				       + (nr_cpu_ids
-					  * sizeof(struct sw_flow_stats *)),
+					  * sizeof(struct sw_flow_stats *)) + cpumask_size(),
 				       0, 0, NULL);
 	if (flow_cache == NULL)
 		return -ENOMEM;