@@ -3237,4 +3237,5 @@ struct rr_numa {
int cur_index;
+ bool idx_inc;
};
@@ -3275,4 +3276,7 @@ rr_numa_list_populate(struct dp_netdev *dp, struct rr_numa_list *rr)
numa->pmds = xrealloc(numa->pmds, numa->n_pmds * sizeof *numa->pmds);
numa->pmds[numa->n_pmds - 1] = pmd;
+ /* At least one pmd so initialise curr_idx and idx_inc. */
+ numa->cur_index = 0;
+ numa->idx_inc = true;
}
}
@@ -3281,5 +3285,20 @@ static struct dp_netdev_pmd_thread *
rr_numa_get_pmd(struct rr_numa *numa)
{
- return numa->pmds[numa->cur_index++ % numa->n_pmds];
+ int numa_idx = numa->cur_index;
+
+ if (numa->idx_inc == true) {
+ if (numa->cur_index == numa->n_pmds-1) {
+ numa->idx_inc = false;
+ } else {
+ numa->cur_index++;
+ }
+ } else {
+ if (numa->cur_index == 0) {
+ numa->idx_inc = true;
+ } else {
+ numa->cur_index--;
+ }
+ }
+ return numa->pmds[numa_idx];
}
@@ -54,5 +54,5 @@ m4_define([CHECK_PMD_THREADS_CREATED], [
m4_define([SED_NUMA_CORE_PATTERN], ["s/\(numa_id \)[[0-9]]*\( core_id \)[[0-9]]*:/\1<cleared>\2<cleared>:/"])
-m4_define([SED_NUMA_CORE_QUEUE_PATTERN], ["s/\(numa_id \)[[0-9]]*\( core_id \)[[0-9]]*:/\1<cleared>\2<cleared>:/;s/\(queue-id: \)\(0 2 4 6\|1 3 5 7\)/\1<cleared>/"])
+m4_define([SED_NUMA_CORE_QUEUE_PATTERN], ["s/\(numa_id \)[[0-9]]*\( core_id \)[[0-9]]*:/\1<cleared>\2<cleared>:/;s/\(queue-id: \)\(1 2 5 6\|0 3 4 7\)/\1<cleared>/"])
m4_define([DUMMY_NUMA], [--dummy-numa="0,0,0,0"])
Up to his point rxqs are sorted by processing cycles they consumed and assigned to pmds in a round robin manner. Ian pointed out that on wrap around the most loaded pmd will be the next one to be assigned an additional rxq and that it would be better to reverse the pmd order when wraparound occurs. In other words, change from assigning by rr to assigning in a forward and reverse cycle through pmds. Suggested-by: Ian Stokes <ian.stokes@intel.com> Signed-off-by: Kevin Traynor <ktraynor@redhat.com> --- lib/dpif-netdev.c | 21 ++++++++++++++++++++- tests/pmd.at | 2 +- 2 files changed, 21 insertions(+), 2 deletions(-)