@@ -922,13 +922,59 @@ pmd_info_show_rxq(struct ds *reply, struct dp_netdev_pmd_thread *pmd)
}
}
+static int
+compare_poll_thread_list(const void *a_, const void *b_)
+{
+ const struct dp_netdev_pmd_thread *a, *b;
+
+ a = *(struct dp_netdev_pmd_thread **)a_;
+ b = *(struct dp_netdev_pmd_thread **)b_;
+
+ if (a->core_id < b->core_id) {
+ return -1;
+ }
+ if (a->core_id > b->core_id) {
+ return 1;
+ }
+ return 0;
+}
+
+/* Create a sorted list of pmd's from the dp->poll_threads cmap. We can use
+ * this list, as long as we do not go to quiescent state. */
+static void
+sorted_poll_thread_list(struct dp_netdev *dp,
+ struct dp_netdev_pmd_thread ***list,
+ size_t *n)
+{
+ struct dp_netdev_pmd_thread *pmd;
+ struct dp_netdev_pmd_thread **pmd_list;
+ size_t k = 0, n_pmds;
+
+ n_pmds = cmap_count(&dp->poll_threads);
+ pmd_list = xcalloc(n_pmds, sizeof *pmd_list);
+
+ CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
+ if (k >= n_pmds) {
+ break;
+ }
+ pmd_list[k++] = pmd;
+ }
+
+ qsort(pmd_list, k, sizeof *pmd_list, compare_poll_thread_list);
+
+ *list = pmd_list;
+ *n = k;
+}
+
static void
dpif_netdev_pmd_info(struct unixctl_conn *conn, int argc, const char *argv[],
void *aux)
{
struct ds reply = DS_EMPTY_INITIALIZER;
struct dp_netdev_pmd_thread *pmd;
+ struct dp_netdev_pmd_thread **pmd_list;
struct dp_netdev *dp = NULL;
+ size_t i, n;
enum pmd_info_type type = *(enum pmd_info_type *) aux;
ovs_mutex_lock(&dp_netdev_mutex);
@@ -947,7 +993,13 @@ dpif_netdev_pmd_info(struct unixctl_conn *conn, int argc, const char *argv[],
return;
}
- CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
+ sorted_poll_thread_list(dp, &pmd_list, &n);
+ for (i = 0; i < n; i++) {
+ pmd = pmd_list[i];
+ if (!pmd) {
+ break;
+ }
+
if (type == PMD_INFO_SHOW_RXQ) {
pmd_info_show_rxq(&reply, pmd);
} else {
@@ -970,6 +1022,7 @@ dpif_netdev_pmd_info(struct unixctl_conn *conn, int argc, const char *argv[],
}
}
}
+ free(pmd_list);
ovs_mutex_unlock(&dp_netdev_mutex);
@@ -53,6 +53,7 @@ m4_define([CHECK_PMD_THREADS_CREATED], [
])
m4_define([SED_NUMA_CORE_PATTERN], ["s/\(numa_id \)[[0-9]]*\( core_id \)[[0-9]]*:/\1<cleared>\2<cleared>:/"])
+m4_define([SED_NUMA_CORE_QUEUE_PATTERN], ["s/\(numa_id \)[[0-9]]*\( core_id \)[[0-9]]*:/\1<cleared>\2<cleared>:/;s/\(queue-id: \)\(0 2 4 6\|1 3 5 7\)/\1<cleared>/"])
m4_define([DUMMY_NUMA], [--dummy-numa="0,0,0,0"])
AT_SETUP([PMD - creating a thread/add-port])
@@ -126,13 +127,13 @@ TMP=$(cat ovs-vswitchd.log | wc -l | tr -d [[:blank:]])
AT_CHECK([ovs-vsctl set Open_vSwitch . other_config:pmd-cpu-mask=0x3])
CHECK_PMD_THREADS_CREATED([2], [], [+$TMP])
-AT_CHECK([ovs-appctl dpif-netdev/pmd-rxq-show | sed SED_NUMA_CORE_PATTERN], [0], [dnl
+AT_CHECK([ovs-appctl dpif-netdev/pmd-rxq-show | sed SED_NUMA_CORE_QUEUE_PATTERN], [0], [dnl
pmd thread numa_id <cleared> core_id <cleared>:
isolated : false
- port: p0 queue-id: 0 2 4 6
+ port: p0 queue-id: <cleared>
pmd thread numa_id <cleared> core_id <cleared>:
isolated : false
- port: p0 queue-id: 1 3 5 7
+ port: p0 queue-id: <cleared>
])
TMP=$(cat ovs-vswitchd.log | wc -l | tr -d [[:blank:]])
The "ovs-appctl dpif-netdev/pmd-rxq-show" and "ovs-appctl dpif-netdev/pmd-stats-show" commands show their output per core_id, sorted on the hash location. My OCD was kicking in when using these commands, hence this change to display them in natural core_id order. In addition I had to change a test case that would fail if the cores where not in order in the hash list. This is due to OVS assigning queues to cores based on the order in the hash list. The test case now checks if any core has the set of queues in the given order. Manually tested this on my setup, and ran clang-analyze. Signed-off-by: Eelco Chaudron <echaudro@redhat.com> --- lib/dpif-netdev.c | 55 ++++++++++++++++++++++++++++++++++++++++++++++++++++++- tests/pmd.at | 7 ++++--- 2 files changed, 58 insertions(+), 4 deletions(-)