@@ -327,6 +327,7 @@ enum dp_stat_type {
enum pmd_cycles_counter_type {
- PMD_CYCLES_POLLING, /* Cycles spent polling NICs. */
- PMD_CYCLES_PROCESSING, /* Cycles spent processing packets */
+ PMD_CYCLES_IDLE, /* Cycles spent idle or unsuccessful polling */
+ PMD_CYCLES_PROCESSING, /* Cycles spent successfully polling and
+ * processing polled packets */
PMD_N_CYCLES
};
@@ -805,8 +806,8 @@ pmd_info_show_stats(struct ds *reply,
ds_put_format(reply,
- "\tpolling cycles:%"PRIu64" (%.02f%%)\n"
+ "\tidle cycles:%"PRIu64" (%.02f%%)\n"
"\tprocessing cycles:%"PRIu64" (%.02f%%)\n",
- cycles[PMD_CYCLES_POLLING],
- cycles[PMD_CYCLES_POLLING] / (double)total_cycles * 100,
+ cycles[PMD_CYCLES_IDLE],
+ cycles[PMD_CYCLES_IDLE] / (double)total_cycles * 100,
cycles[PMD_CYCLES_PROCESSING],
cycles[PMD_CYCLES_PROCESSING] / (double)total_cycles * 100);
@@ -3080,5 +3081,18 @@ cycles_count_end(struct dp_netdev_pmd_thread *pmd,
}
-static void
+/* Calculate the intermediate cycle result and add to the counter 'type' */
+static inline void
+cycles_count_intermediate(struct dp_netdev_pmd_thread *pmd,
+ enum pmd_cycles_counter_type type)
+ OVS_NO_THREAD_SAFETY_ANALYSIS
+{
+ unsigned long long new_cycles = cycles_counter();
+ unsigned long long interval = new_cycles - pmd->last_cycles;
+ pmd->last_cycles = new_cycles;
+
+ non_atomic_ullong_add(&pmd->cycles.n[type], interval);
+}
+
+static int
dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread *pmd,
struct netdev_rxq *rx,
@@ -3087,15 +3101,13 @@ dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread *pmd,
struct dp_packet_batch batch;
int error;
+ int batch_cnt = 0;
dp_packet_batch_init(&batch);
- cycles_count_start(pmd);
error = netdev_rxq_recv(rx, &batch);
- cycles_count_end(pmd, PMD_CYCLES_POLLING);
if (!error) {
*recirc_depth_get() = 0;
- cycles_count_start(pmd);
+ batch_cnt = batch.count;
dp_netdev_input(pmd, &batch, port_no);
- cycles_count_end(pmd, PMD_CYCLES_PROCESSING);
} else if (error != EAGAIN && error != EOPNOTSUPP) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
@@ -3104,4 +3116,6 @@ dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread *pmd,
netdev_rxq_get_name(rx), ovs_strerror(error));
}
+
+ return batch_cnt;
}
@@ -3566,4 +3580,5 @@ dpif_netdev_run(struct dpif *dpif)
struct dp_netdev_pmd_thread *non_pmd;
uint64_t new_tnl_seq;
+ int process_packets = 0;
ovs_mutex_lock(&dp->port_mutex);
@@ -3571,4 +3586,5 @@ dpif_netdev_run(struct dpif *dpif)
if (non_pmd) {
ovs_mutex_lock(&dp->non_pmd_mutex);
+ cycles_count_start(non_pmd);
HMAP_FOR_EACH (port, node, &dp->ports) {
if (!netdev_is_pmd(port->netdev)) {
@@ -3576,9 +3592,15 @@ dpif_netdev_run(struct dpif *dpif)
for (i = 0; i < port->n_rxq; i++) {
- dp_netdev_process_rxq_port(non_pmd, port->rxqs[i].rx,
- port->port_no);
+ process_packets =
+ dp_netdev_process_rxq_port(non_pmd,
+ port->rxqs[i].rx,
+ port->port_no);
+ cycles_count_intermediate(non_pmd, process_packets ?
+ PMD_CYCLES_PROCESSING
+ : PMD_CYCLES_IDLE);
}
}
}
+ cycles_count_end(non_pmd, PMD_CYCLES_IDLE);
dpif_netdev_xps_revalidate_pmd(non_pmd, time_msec(), false);
ovs_mutex_unlock(&dp->non_pmd_mutex);
@@ -3705,4 +3727,5 @@ pmd_thread_main(void *f_)
int poll_cnt;
int i;
+ int process_packets = 0;
poll_list = NULL;
@@ -3731,8 +3754,13 @@ reload:
}
+ cycles_count_start(pmd);
for (;;) {
for (i = 0; i < poll_cnt; i++) {
- dp_netdev_process_rxq_port(pmd, poll_list[i].rx,
- poll_list[i].port_no);
+ process_packets =
+ dp_netdev_process_rxq_port(pmd, poll_list[i].rx,
+ poll_list[i].port_no);
+ cycles_count_intermediate(pmd,
+ process_packets ? PMD_CYCLES_PROCESSING
+ : PMD_CYCLES_IDLE);
}
@@ -3755,4 +3783,6 @@ reload:
}
+ cycles_count_end(pmd, PMD_CYCLES_IDLE);
+
poll_cnt = pmd_load_queues_and_ports(pmd, &poll_list);
exiting = latch_is_set(&pmd->exit_latch);
@@ -270,5 +270,8 @@ packets received by the datapath. Cycles are counted using the TSC or similar
facilities (when available on the platform). To reset these counters use
\fBdpif-netdev/pmd-stats-clear\fR. The duration of one cycle depends on the
-measuring infrastructure.
+measuring infrastructure. ``idle cycles'' refers to cycles spent polling
+devices but not receiving any packets. ``processing cycles'' refers to cycles
+spent polling devices and sucessfully receiving packets, plus the cycles
+spent processing said packets.
.IP "\fBdpif-netdev/pmd-stats-clear\fR [\fIdp\fR]"
Resets to zero the per pmd thread performance numbers shown by the