diff mbox

[ovs-dev,5/8] dpif-netdev: Count the rxq processing cycles for an rxq.

Message ID 1498653773-13757-6-git-send-email-ktraynor@redhat.com
State Superseded
Headers show

Commit Message

Kevin Traynor June 28, 2017, 12:42 p.m. UTC
Count the cycles used for processing an rxq during the pmd
optimization interval. As this is an in flight counter and
pmds run independently, also store the total cycles used
during the last full interval.

Signed-off-by: Kevin Traynor <ktraynor@redhat.com>
---
 lib/dpif-netdev.c | 30 +++++++++++++++++++++++++-----
 1 file changed, 25 insertions(+), 5 deletions(-)
diff mbox

Patch

diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index b420aef..6ccad13 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -675,5 +675,6 @@  static void pmd_load_cached_ports(struct dp_netdev_pmd_thread *pmd)
     OVS_REQUIRES(pmd->port_mutex);
 static inline void
-dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd);
+dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd,
+                           struct polled_queue *poll_list, int poll_cnt);
 static void
 dp_netdev_rxq_set_cycles(struct dp_netdev_rxq *rx,
@@ -3174,5 +3175,5 @@  port_reconfigure(struct dp_netdev_port *port)
 {
     struct netdev *netdev = port->netdev;
-    int i, err;
+    int i, err, last_nrxq;
 
     port->need_reconfigure = false;
@@ -3183,4 +3184,5 @@  port_reconfigure(struct dp_netdev_port *port)
         port->rxqs[i].rx = NULL;
     }
+    last_nrxq = port->n_rxq;
     port->n_rxq = 0;
 
@@ -3203,4 +3205,9 @@  port_reconfigure(struct dp_netdev_port *port)
     for (i = 0; i < netdev_n_rxq(netdev); i++) {
         port->rxqs[i].port = port;
+        if (i >= last_nrxq) {
+            /* Only reset cycle stats for new queues */
+            dp_netdev_rxq_set_cycles(&port->rxqs[i], RXQ_CYCLES_PROC_CURR, 0);
+            dp_netdev_rxq_set_cycles(&port->rxqs[i], RXQ_CYCLES_PROC_LAST, 0);
+        }
         err = netdev_rxq_open(netdev, &port->rxqs[i].rx, i);
         if (err) {
@@ -3796,5 +3803,5 @@  reload:
                 dp_netdev_process_rxq_port(pmd, poll_list[i].rxq->rx,
                                            poll_list[i].port_no);
-            cycles_count_intermediate(pmd, NULL,
+            cycles_count_intermediate(pmd, poll_list[i].rxq,
                                       process_packets ? PMD_CYCLES_PROCESSING
                                                       : PMD_CYCLES_IDLE);
@@ -3807,5 +3814,5 @@  reload:
 
             coverage_try_clear();
-            dp_netdev_pmd_try_optimize(pmd);
+            dp_netdev_pmd_try_optimize(pmd, poll_list, poll_cnt);
             if (!ovsrcu_try_quiesce()) {
                 emc_cache_slow_sweep(&pmd->flow_cache);
@@ -5701,8 +5708,11 @@  dpcls_sort_subtable_vector(struct dpcls *cls)
 
 static inline void
-dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd)
+dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd,
+                           struct polled_queue *poll_list, int poll_cnt)
 {
     struct dpcls *cls;
     long long int now = time_msec();
+    int i;
+    uint64_t rxq_cyc_curr;
 
     if (now > pmd->next_optimization) {
@@ -5716,4 +5726,14 @@  dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd)
             ovs_mutex_unlock(&pmd->flow_mutex);
         }
+
+        /* Get the cycles that were used to process each queue and store. */
+        for (i = 0; i < poll_cnt; i++) {
+            rxq_cyc_curr = dp_netdev_rxq_get_cycles(poll_list[i].rxq,
+                                                    RXQ_CYCLES_PROC_CURR);
+            dp_netdev_rxq_set_cycles(poll_list[i].rxq, RXQ_CYCLES_PROC_LAST,
+                                     rxq_cyc_curr);
+            dp_netdev_rxq_set_cycles(poll_list[i].rxq, RXQ_CYCLES_PROC_CURR,
+                                     0);
+        }
         /* Start new measuring interval */
         pmd->next_optimization = now + PMD_OPTIMIZATION_INTERVAL;