@@ -140,4 +140,16 @@ Core 7: Q4 (70%) | Q5 (10%)
core 8: Q3 (60%) | Q0 (30%)
+To see the current measured usage history of pmd core cycles for each rxq::
+
+ $ ovs-appctl dpif-netdev/pmd-rxq-show
+
+.. note::
+
+ A history of one minute is recorded and shown for each rxq to allow for
+ traffic pattern spikes. An rxq's pmd core cycles usage changes due to traffic
+ pattern or reconfig changes will take one minute before they are fully
+ reflected in the stats. In this way the the stats show what would be used
+ during a new rxq to pmd assignment.
+
Rxq to pmds assignment takes place whenever there are configuration changes
or can be triggered by using::
@@ -19,4 +19,5 @@ Post-v2.8.0
* Add support for DPDK v17.11
* Add support for vHost IOMMU
+ * Add rxq use of pmd core statistics to pmd-rxq-show
v2.8.0 - 31 Aug 2017
@@ -946,7 +946,8 @@ pmd_info_show_rxq(struct ds *reply, struct dp_netdev_pmd_thread *pmd)
{
if (pmd->core_id != NON_PMD_CORE_ID) {
- const char *prev_name = NULL;
struct rxq_poll *list;
size_t i, n;
+ uint64_t *rxq_intrvl_sum;
+ uint64_t pmd_rxq_idle, pmd_rxq_proc;
ds_put_format(reply,
@@ -957,19 +958,39 @@ pmd_info_show_rxq(struct ds *reply, struct dp_netdev_pmd_thread *pmd)
ovs_mutex_lock(&pmd->port_mutex);
sorted_poll_list(pmd, &list, &n);
+ rxq_intrvl_sum = xcalloc(n, sizeof(uint64_t));
+
+ /* Calculate the cycles for each queue and sum for pmd
+ * to be able to calculate the core percentage later. */
+ pmd_rxq_idle = pmd_rxq_proc = 0;
+ for (i = 0; i < n; i++) {
+ for (int j = 0; j < PMD_RXQ_INTERVAL_MAX; j++) {
+ pmd_rxq_idle += dp_netdev_rxq_get_intrvl_cycles(
+ list[i].rxq, PMD_CYCLES_IDLE, j);
+ rxq_intrvl_sum[i] += dp_netdev_rxq_get_intrvl_cycles(
+ list[i].rxq,
+ PMD_CYCLES_PROCESSING, j);
+ }
+ /* Sum processing cycles of each rxq. */
+ pmd_rxq_proc += rxq_intrvl_sum[i];
+ }
+
for (i = 0; i < n; i++) {
const char *name = netdev_rxq_get_name(list[i].rxq->rx);
- if (!prev_name || strcmp(name, prev_name)) {
- if (prev_name) {
- ds_put_cstr(reply, "\n");
- }
- ds_put_format(reply, "\tport: %s\tqueue-id:", name);
- }
- ds_put_format(reply, " %d",
+ ds_put_format(reply, "\tport: %16s\tqueue-id: %2d", name,
netdev_rxq_get_queue_id(list[i].rxq->rx));
- prev_name = name;
+ ds_put_format(reply, "\tpmd usage: ");
+ if (pmd_rxq_proc || pmd_rxq_idle) {
+ ds_put_format(reply, "%2"PRIu64"",
+ rxq_intrvl_sum[i] * 100 /
+ (pmd_rxq_proc + pmd_rxq_idle));
+ ds_put_cstr(reply, " %");
+ } else {
+ ds_put_format(reply, "%s", "NOT AVAIL");
+ }
+ ds_put_cstr(reply, "\n");
}
ovs_mutex_unlock(&pmd->port_mutex);
- ds_put_cstr(reply, "\n");
+ free(rxq_intrvl_sum);
free(list);
}
@@ -7,5 +7,13 @@ m4_divert_push([PREPARE_TESTS])
# port_name rxq_id numa_id core_id
parse_pmd_rxq_show () {
- awk '/pmd/ {numa=$4; core=substr($6, 1, length($6) - 1)} /\t/{for (i=4; i<=NF; i++) print $2, $i, numa, core}' | sort
+ awk '/pmd thread/ {numa=$4; core=substr($6, 1, length($6) - 1)} /\tport:/ {print $2, $4, numa, core}' | sort
+}
+
+# Given the output of `ovs-appctl dpif-netdev/pmd-rxq-show`,
+# and with queues for each core on one line, prints the rxqs
+# of the core on one line
+# 'port:' port_name 'queue_id:' rxq_id rxq_id rxq_id rxq_id
+parse_pmd_rxq_show_group () {
+ awk '/port:/ {print $1, $2, $3, $4, $12, $20, $28}'
}
@@ -54,5 +62,5 @@ m4_define([CHECK_PMD_THREADS_CREATED], [
m4_define([SED_NUMA_CORE_PATTERN], ["s/\(numa_id \)[[0-9]]*\( core_id \)[[0-9]]*:/\1<cleared>\2<cleared>:/"])
-m4_define([SED_NUMA_CORE_QUEUE_PATTERN], ["s/\(numa_id \)[[0-9]]*\( core_id \)[[0-9]]*:/\1<cleared>\2<cleared>:/;s/\(queue-id: \)1 2 5 6/\1<cleared>/;s/\(queue-id: \)0 3 4 7/\1<cleared>/"])
+m4_define([SED_NUMA_CORE_QUEUE_PATTERN], ["s/1 2 5 6/<group>/;s/0 3 4 7/<group>/"])
m4_define([DUMMY_NUMA], [--dummy-numa="0,0,0,0"])
@@ -66,5 +74,5 @@ AT_CHECK([ovs-appctl dpif-netdev/pmd-rxq-show | sed SED_NUMA_CORE_PATTERN], [0],
pmd thread numa_id <cleared> core_id <cleared>:
isolated : false
- port: p0 queue-id: 0
+ port: p0 queue-id: 0 pmd usage: NOT AVAIL
])
@@ -97,5 +105,12 @@ AT_CHECK([ovs-appctl dpif-netdev/pmd-rxq-show | sed SED_NUMA_CORE_PATTERN], [0],
pmd thread numa_id <cleared> core_id <cleared>:
isolated : false
- port: p0 queue-id: 0 1 2 3 4 5 6 7
+ port: p0 queue-id: 0 pmd usage: NOT AVAIL
+ port: p0 queue-id: 1 pmd usage: NOT AVAIL
+ port: p0 queue-id: 2 pmd usage: NOT AVAIL
+ port: p0 queue-id: 3 pmd usage: NOT AVAIL
+ port: p0 queue-id: 4 pmd usage: NOT AVAIL
+ port: p0 queue-id: 5 pmd usage: NOT AVAIL
+ port: p0 queue-id: 6 pmd usage: NOT AVAIL
+ port: p0 queue-id: 7 pmd usage: NOT AVAIL
])
@@ -121,5 +136,12 @@ AT_CHECK([ovs-appctl dpif-netdev/pmd-rxq-show | sed SED_NUMA_CORE_PATTERN], [0],
pmd thread numa_id <cleared> core_id <cleared>:
isolated : false
- port: p0 queue-id: 0 1 2 3 4 5 6 7
+ port: p0 queue-id: 0 pmd usage: NOT AVAIL
+ port: p0 queue-id: 1 pmd usage: NOT AVAIL
+ port: p0 queue-id: 2 pmd usage: NOT AVAIL
+ port: p0 queue-id: 3 pmd usage: NOT AVAIL
+ port: p0 queue-id: 4 pmd usage: NOT AVAIL
+ port: p0 queue-id: 5 pmd usage: NOT AVAIL
+ port: p0 queue-id: 6 pmd usage: NOT AVAIL
+ port: p0 queue-id: 7 pmd usage: NOT AVAIL
])
@@ -128,11 +150,7 @@ AT_CHECK([ovs-vsctl set Open_vSwitch . other_config:pmd-cpu-mask=0x3])
CHECK_PMD_THREADS_CREATED([2], [], [+$TMP])
-AT_CHECK([ovs-appctl dpif-netdev/pmd-rxq-show | sed SED_NUMA_CORE_QUEUE_PATTERN], [0], [dnl
-pmd thread numa_id <cleared> core_id <cleared>:
- isolated : false
- port: p0 queue-id: <cleared>
-pmd thread numa_id <cleared> core_id <cleared>:
- isolated : false
- port: p0 queue-id: <cleared>
+AT_CHECK([ovs-appctl dpif-netdev/pmd-rxq-show | sed ':a;/AVAIL$/{N;s/\n//;ba}' | parse_pmd_rxq_show_group | sed SED_NUMA_CORE_QUEUE_PATTERN], [0], [dnl
+port: p0 queue-id: <group>
+port: p0 queue-id: <group>
])
@@ -144,5 +162,12 @@ AT_CHECK([ovs-appctl dpif-netdev/pmd-rxq-show | sed SED_NUMA_CORE_PATTERN], [0],
pmd thread numa_id <cleared> core_id <cleared>:
isolated : false
- port: p0 queue-id: 0 1 2 3 4 5 6 7
+ port: p0 queue-id: 0 pmd usage: NOT AVAIL
+ port: p0 queue-id: 1 pmd usage: NOT AVAIL
+ port: p0 queue-id: 2 pmd usage: NOT AVAIL
+ port: p0 queue-id: 3 pmd usage: NOT AVAIL
+ port: p0 queue-id: 4 pmd usage: NOT AVAIL
+ port: p0 queue-id: 5 pmd usage: NOT AVAIL
+ port: p0 queue-id: 6 pmd usage: NOT AVAIL
+ port: p0 queue-id: 7 pmd usage: NOT AVAIL
])
It is based on the length of history that is stored about an rxq (currently 1 min). $ ovs-appctl dpif-netdev/pmd-rxq-show pmd thread numa_id 0 core_id 4: isolated : false port: dpdkphy1 queue-id: 0 pmd usage: 70 % port: dpdkvhost0 queue-id: 0 pmd usage: 0 % pmd thread numa_id 0 core_id 6: isolated : false port: dpdkphy0 queue-id: 0 pmd usage: 64 % port: dpdkvhost1 queue-id: 0 pmd usage: 0 % These values are what would be used as part of rxq to pmd assignment due to a reconfiguration event e.g. adding pmds, adding rxqs or with the command: ovs-appctl dpif-netdev/pmd-rxq-rebalance Signed-off-by: Kevin Traynor <ktraynor@redhat.com> --- Documentation/howto/dpdk.rst | 12 +++++++++++ NEWS | 1 + lib/dpif-netdev.c | 41 ++++++++++++++++++++++++++--------- tests/pmd.at | 51 +++++++++++++++++++++++++++++++++----------- 4 files changed, 82 insertions(+), 23 deletions(-)