@@ -118,7 +118,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
rcu_assign_pointer(flow->stats[cpu],
new_stats);
- cpumask_set_cpu(cpu, &flow->cpu_used_mask);
+ cpumask_set_cpu(cpu, flow->cpu_used_mask);
goto unlock;
}
}
@@ -146,7 +146,7 @@ void ovs_flow_stats_get(const struct sw_flow *flow,
memset(ovs_stats, 0, sizeof(*ovs_stats));
/* We open code this to make sure cpu 0 is always considered */
- for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
+ for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, flow->cpu_used_mask)) {
struct sw_flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]);
if (stats) {
@@ -170,7 +170,7 @@ void ovs_flow_stats_clear(struct sw_flow *flow)
int cpu;
/* We open code this to make sure cpu 0 is always considered */
- for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
+ for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, flow->cpu_used_mask)) {
struct sw_flow_stats *stats = ovsl_dereference(flow->stats[cpu]);
if (stats) {
@@ -227,7 +227,7 @@ struct sw_flow {
*/
struct sw_flow_key key;
struct sw_flow_id id;
- struct cpumask cpu_used_mask;
+ struct cpumask *cpu_used_mask;
struct sw_flow_mask *mask;
struct sw_flow_actions __rcu *sf_acts;
struct sw_flow_stats __rcu *stats[]; /* One for each CPU. First one
@@ -100,11 +100,12 @@ struct sw_flow *ovs_flow_alloc(void)
if (!stats)
goto err;
+ flow->cpu_used_mask = (struct cpumask *)&(flow->stats[nr_cpu_ids]);
spin_lock_init(&stats->lock);
RCU_INIT_POINTER(flow->stats[0], stats);
- cpumask_set_cpu(0, &flow->cpu_used_mask);
+ cpumask_set_cpu(0, flow->cpu_used_mask);
return flow;
err:
@@ -126,7 +127,7 @@ static void flow_free(struct sw_flow *flow)
if (flow->sf_acts)
ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts);
/* We open code this to make sure cpu 0 is always considered */
- for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask))
+ for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, flow->cpu_used_mask))
if (flow->stats[cpu])
kmem_cache_free(flow_stats_cache,
rcu_dereference_raw(flow->stats[cpu]));
@@ -961,9 +962,30 @@ int ovs_flow_init(void)
BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
+ /* Directly defining 'struct cpumask' in 'struct sw_flow' has
+ * drawbacks:
+ * *It takes memory unnecessarily (by default 1000 bytes or so)
+ * The reason is that compilation option CONFIG_NR_CPUS decides
+ * the value of NR_CPUS, which in turn decides size of
+ * 'struct cpumask', while in practice we should use
+ * using local machine's CPU count instead.
+ * *Flow creation needs cycles to initialize them
+ * *Flow deletion/get/clear needs cycles to iterate through them
+ *
+ * To address this, cpu_used_mask used the size for local
+ * machine's real CPU count instead of NR_CPUS.
+ *
+ * 'struct sw_flow' already has one FAM(Flexible Array Memember)
+ * 'stats' on the tail, C structure does not allow for one more
+ * FAM in sw_flow.
+ *
+ * Final solution is extending memory in stats to make room
+ * for cpu_used_mask at the end.
+ * This is how we have 'cpumask_size()' below
+ */
flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
+ (nr_cpu_ids
- * sizeof(struct sw_flow_stats *)),
+ * sizeof(struct sw_flow_stats *)) + cpumask_size(),
0, 0, NULL);
if (flow_cache == NULL)
return -ENOMEM;