Message ID | 1510075901-32305-1-git-send-email-ktraynor@redhat.com |
---|---|
State | Accepted |
Headers | show |
Series | [ovs-dev,v2,1/2] dpif-netdev: Rename rxq_interval. | expand |
> rxq_interval was added before there was other #defines and code related to > rxq intervals. > > Rename to rxq_next_cycles_store in order to make it more intuitive. > > Requested-by: Ilya Maximets <i.maximets@samsung.com> > Signed-off-by: Kevin Traynor <ktraynor@redhat.com> > Acked-by: Antonio Fischetti <antonio.fischetti@intel.com> Thanks for this Kevin, this is a trivial patch so I can roll it into the dpdk_merge branch for validation, should be part of the next pull request. Ian > --- > v2: Rebase. > > lib/dpif-netdev.c | 8 ++++---- > 1 file changed, 4 insertions(+), 4 deletions(-) > > diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index 599308d..66c14f9 > 100644 > --- a/lib/dpif-netdev.c > +++ b/lib/dpif-netdev.c > @@ -590,5 +590,5 @@ struct dp_netdev_pmd_thread { > /* End of the next time interval for which processing cycles > are stored for each polled rxq. */ > - long long int rxq_interval; > + long long int rxq_next_cycle_store; > > /* Cycles counters */ > @@ -4544,5 +4544,5 @@ dp_netdev_configure_pmd(struct dp_netdev_pmd_thread > *pmd, struct dp_netdev *dp, > cmap_init(&pmd->classifiers); > pmd->next_optimization = time_msec() + DPCLS_OPTIMIZATION_INTERVAL; > - pmd->rxq_interval = time_msec() + PMD_RXQ_INTERVAL_LEN; > + pmd->rxq_next_cycle_store = time_msec() + PMD_RXQ_INTERVAL_LEN; > hmap_init(&pmd->poll_list); > hmap_init(&pmd->tx_ports); > @@ -6011,5 +6011,5 @@ dp_netdev_pmd_try_optimize(struct > dp_netdev_pmd_thread *pmd, > long long int now = time_msec(); > > - if (now > pmd->rxq_interval) { > + if (now > pmd->rxq_next_cycle_store) { > /* Get the cycles that were used to process each queue and store. > */ > for (unsigned i = 0; i < poll_cnt; i++) { @@ -6021,5 +6021,5 @@ > dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd, > } > /* Start new measuring interval */ > - pmd->rxq_interval = now + PMD_RXQ_INTERVAL_LEN; > + pmd->rxq_next_cycle_store = now + PMD_RXQ_INTERVAL_LEN; > } > > -- > 1.8.3.1 > > _______________________________________________ > dev mailing list > dev@openvswitch.org > https://mail.openvswitch.org/mailman/listinfo/ovs-dev
> > rxq_interval was added before there was other #defines and code > > related to rxq intervals. > > > > Rename to rxq_next_cycles_store in order to make it more intuitive. > > > > Requested-by: Ilya Maximets <i.maximets@samsung.com> > > Signed-off-by: Kevin Traynor <ktraynor@redhat.com> > > Acked-by: Antonio Fischetti <antonio.fischetti@intel.com> > > Thanks for this Kevin, this is a trivial patch so I can roll it into the > dpdk_merge branch for validation, should be part of the next pull request. > FYI this is now on the dpdk_merge branch. https://github.com/istokes/ovs/tree/dpdk_merge Thanks Ian > Ian > > --- > > v2: Rebase. > > > > lib/dpif-netdev.c | 8 ++++---- > > 1 file changed, 4 insertions(+), 4 deletions(-) > > > > diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index > > 599308d..66c14f9 > > 100644 > > --- a/lib/dpif-netdev.c > > +++ b/lib/dpif-netdev.c > > @@ -590,5 +590,5 @@ struct dp_netdev_pmd_thread { > > /* End of the next time interval for which processing cycles > > are stored for each polled rxq. */ > > - long long int rxq_interval; > > + long long int rxq_next_cycle_store; > > > > /* Cycles counters */ > > @@ -4544,5 +4544,5 @@ dp_netdev_configure_pmd(struct > > dp_netdev_pmd_thread *pmd, struct dp_netdev *dp, > > cmap_init(&pmd->classifiers); > > pmd->next_optimization = time_msec() + DPCLS_OPTIMIZATION_INTERVAL; > > - pmd->rxq_interval = time_msec() + PMD_RXQ_INTERVAL_LEN; > > + pmd->rxq_next_cycle_store = time_msec() + PMD_RXQ_INTERVAL_LEN; > > hmap_init(&pmd->poll_list); > > hmap_init(&pmd->tx_ports); > > @@ -6011,5 +6011,5 @@ dp_netdev_pmd_try_optimize(struct > > dp_netdev_pmd_thread *pmd, > > long long int now = time_msec(); > > > > - if (now > pmd->rxq_interval) { > > + if (now > pmd->rxq_next_cycle_store) { > > /* Get the cycles that were used to process each queue and > store. > > */ > > for (unsigned i = 0; i < poll_cnt; i++) { @@ -6021,5 +6021,5 > > @@ dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd, > > } > > /* Start new measuring interval */ > > - pmd->rxq_interval = now + PMD_RXQ_INTERVAL_LEN; > > + pmd->rxq_next_cycle_store = now + PMD_RXQ_INTERVAL_LEN; > > } > > > > -- > > 1.8.3.1 > > > > _______________________________________________ > > dev mailing list > > dev@openvswitch.org > > https://mail.openvswitch.org/mailman/listinfo/ovs-dev > _______________________________________________ > dev mailing list > dev@openvswitch.org > https://mail.openvswitch.org/mailman/listinfo/ovs-dev
diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index 599308d..66c14f9 100644 --- a/lib/dpif-netdev.c +++ b/lib/dpif-netdev.c @@ -590,5 +590,5 @@ struct dp_netdev_pmd_thread { /* End of the next time interval for which processing cycles are stored for each polled rxq. */ - long long int rxq_interval; + long long int rxq_next_cycle_store; /* Cycles counters */ @@ -4544,5 +4544,5 @@ dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd, struct dp_netdev *dp, cmap_init(&pmd->classifiers); pmd->next_optimization = time_msec() + DPCLS_OPTIMIZATION_INTERVAL; - pmd->rxq_interval = time_msec() + PMD_RXQ_INTERVAL_LEN; + pmd->rxq_next_cycle_store = time_msec() + PMD_RXQ_INTERVAL_LEN; hmap_init(&pmd->poll_list); hmap_init(&pmd->tx_ports); @@ -6011,5 +6011,5 @@ dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd, long long int now = time_msec(); - if (now > pmd->rxq_interval) { + if (now > pmd->rxq_next_cycle_store) { /* Get the cycles that were used to process each queue and store. */ for (unsigned i = 0; i < poll_cnt; i++) { @@ -6021,5 +6021,5 @@ dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd, } /* Start new measuring interval */ - pmd->rxq_interval = now + PMD_RXQ_INTERVAL_LEN; + pmd->rxq_next_cycle_store = now + PMD_RXQ_INTERVAL_LEN; }