diff mbox series

[ovs-dev,v2,2/2] dpif-netdev: Calculate rxq cycles prior to rxq_cycle_sort calls.

Message ID 1510075901-32305-2-git-send-email-ktraynor@redhat.com
State Superseded
Headers show
Series [ovs-dev,v2,1/2] dpif-netdev: Rename rxq_interval. | expand

Commit Message

Kevin Traynor Nov. 7, 2017, 5:31 p.m. UTC
rxq_cycle_sort summed the latest cycles from each queue for sorting.
While each comparison was correct with the latest cycles, the cycles
could change between calls to rxq_cycle_sort. In order to use
consistent values through each call to rxq_cycle_sort, sum the cycles
prior to rxq_cycle_sort being called.

Also, change return to 0 when values are equal.

Requested-by: Ilya Maximets <i.maximets@samsung.com>
Signed-off-by: Kevin Traynor <ktraynor@redhat.com>
---
v2: No change, but was previously 3/3.

 lib/dpif-netdev.c | 22 +++++++++++++---------
 1 file changed, 13 insertions(+), 9 deletions(-)

Comments

Stokes, Ian Nov. 13, 2017, 9:42 p.m. UTC | #1
> rxq_cycle_sort summed the latest cycles from each queue for sorting.
> While each comparison was correct with the latest cycles, the cycles could
> change between calls to rxq_cycle_sort. In order to use consistent values
> through each call to rxq_cycle_sort, sum the cycles prior to
> rxq_cycle_sort being called.
> 
> Also, change return to 0 when values are equal.
> 

Thanks for this Kevin, I'll validate this patch with a view to add it to the DPDK merge branch, Minor comments below.

> Requested-by: Ilya Maximets <i.maximets@samsung.com>

@Ilya: do you have any comments? If not I'll look to push this in the next day after testing.

> Signed-off-by: Kevin Traynor <ktraynor@redhat.com>
> ---
> v2: No change, but was previously 3/3.
> 
>  lib/dpif-netdev.c | 22 +++++++++++++---------
>  1 file changed, 13 insertions(+), 9 deletions(-)
> 
> diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index 66c14f9..d9e2912
> 100644
> --- a/lib/dpif-netdev.c
> +++ b/lib/dpif-netdev.c
> @@ -3458,18 +3458,14 @@ rxq_cycle_sort(const void *a, const void *b)
>      struct dp_netdev_rxq *qb;
>      uint64_t total_qa, total_qb;
> -    unsigned i;
> 
>      qa = *(struct dp_netdev_rxq **) a;
>      qb = *(struct dp_netdev_rxq **) b;
> 
> -    total_qa = total_qb = 0;
> -    for (i = 0; i < PMD_RXQ_INTERVAL_MAX; i++) {
> -        total_qa += dp_netdev_rxq_get_intrvl_cycles(qa, i);
> -        total_qb += dp_netdev_rxq_get_intrvl_cycles(qb, i);
> -    }
> -    dp_netdev_rxq_set_cycles(qa, RXQ_CYCLES_PROC_HIST, total_qa);
> -    dp_netdev_rxq_set_cycles(qb, RXQ_CYCLES_PROC_HIST, total_qb);
> +    total_qa = dp_netdev_rxq_get_cycles(qa, RXQ_CYCLES_PROC_HIST);
> +    total_qb = dp_netdev_rxq_get_cycles(qb, RXQ_CYCLES_PROC_HIST);
> 

Minor nit but I think the comment for this function should be extended now to give a more detail of what happening and context on possible return values. (3 possible return values now). Maybe the behavior is detailed elsewhere in the code when rxq_cycle_sort is called but I didn't spot it.

to I'm thinking for clarity this could be documented in the comment for the function overall?
> -    if (total_qa >= total_qb) {
> +    if (total_qa == total_qb) {
> +        return 0;
> +    } else if (total_qa > total_qb) {
>          return -1;
>      }
> @@ -3519,4 +3515,6 @@ rxq_scheduling(struct dp_netdev *dp, bool pinned)
> OVS_REQUIRES(dp->port_mutex)
>                  }
>              } else if (!pinned && q->core_id == OVS_CORE_UNSPEC) {
> +                uint64_t cycle_hist = 0;
> +
>                  if (n_rxqs == 0) {
>                      rxqs = xmalloc(sizeof *rxqs); @@ -3524,4 +3522,10 @@
> rxq_scheduling(struct dp_netdev *dp, bool pinned) OVS_REQUIRES(dp-
> >port_mutex)
>                      rxqs = xrealloc(rxqs, sizeof *rxqs * (n_rxqs + 1));
>                  }
> +
> +                for (unsigned i = 0; i < PMD_RXQ_INTERVAL_MAX; i++) {
> +                    cycle_hist += dp_netdev_rxq_get_intrvl_cycles(q, i);
> +                }
> +                dp_netdev_rxq_set_cycles(q, RXQ_CYCLES_PROC_HIST,
> + cycle_hist);
> +
>                  /* Store the queue. */
>                  rxqs[n_rxqs++] = q;
> --
> 1.8.3.1
> 
> _______________________________________________
> dev mailing list
> dev@openvswitch.org
> https://mail.openvswitch.org/mailman/listinfo/ovs-dev
Kevin Traynor Nov. 14, 2017, 4:57 a.m. UTC | #2
On 11/13/2017 09:42 PM, Stokes, Ian wrote:
>> rxq_cycle_sort summed the latest cycles from each queue for sorting.
>> While each comparison was correct with the latest cycles, the cycles could
>> change between calls to rxq_cycle_sort. In order to use consistent values
>> through each call to rxq_cycle_sort, sum the cycles prior to
>> rxq_cycle_sort being called.
>>
>> Also, change return to 0 when values are equal.
>>
> 
> Thanks for this Kevin, I'll validate this patch with a view to add it to the DPDK merge branch, Minor comments below.

Thanks for reviewing. Actually, hold off on applying this as, based on
feedback from Alin, I may need to change the sort a bit to give a
consistent result for the unit test on Windows.

> 
>> Requested-by: Ilya Maximets <i.maximets@samsung.com>
> 
> @Ilya: do you have any comments? If not I'll look to push this in the next day after testing.
> 
>> Signed-off-by: Kevin Traynor <ktraynor@redhat.com>
>> ---
>> v2: No change, but was previously 3/3.
>>
>>  lib/dpif-netdev.c | 22 +++++++++++++---------
>>  1 file changed, 13 insertions(+), 9 deletions(-)
>>
>> diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index 66c14f9..d9e2912
>> 100644
>> --- a/lib/dpif-netdev.c
>> +++ b/lib/dpif-netdev.c
>> @@ -3458,18 +3458,14 @@ rxq_cycle_sort(const void *a, const void *b)
>>      struct dp_netdev_rxq *qb;
>>      uint64_t total_qa, total_qb;
>> -    unsigned i;
>>
>>      qa = *(struct dp_netdev_rxq **) a;
>>      qb = *(struct dp_netdev_rxq **) b;
>>
>> -    total_qa = total_qb = 0;
>> -    for (i = 0; i < PMD_RXQ_INTERVAL_MAX; i++) {
>> -        total_qa += dp_netdev_rxq_get_intrvl_cycles(qa, i);
>> -        total_qb += dp_netdev_rxq_get_intrvl_cycles(qb, i);
>> -    }
>> -    dp_netdev_rxq_set_cycles(qa, RXQ_CYCLES_PROC_HIST, total_qa);
>> -    dp_netdev_rxq_set_cycles(qb, RXQ_CYCLES_PROC_HIST, total_qb);
>> +    total_qa = dp_netdev_rxq_get_cycles(qa, RXQ_CYCLES_PROC_HIST);
>> +    total_qb = dp_netdev_rxq_get_cycles(qb, RXQ_CYCLES_PROC_HIST);
>>
> 
> Minor nit but I think the comment for this function should be extended now to give a more detail of what happening and context on possible return values. (3 possible return values now). Maybe the behavior is detailed elsewhere in the code when rxq_cycle_sort is called but I didn't spot it.
> 

sure I can add some additional comments

> to I'm thinking for clarity this could be documented in the comment for the function overall?
>> -    if (total_qa >= total_qb) {
>> +    if (total_qa == total_qb) {
>> +        return 0;
>> +    } else if (total_qa > total_qb) {
>>          return -1;
>>      }
>> @@ -3519,4 +3515,6 @@ rxq_scheduling(struct dp_netdev *dp, bool pinned)
>> OVS_REQUIRES(dp->port_mutex)
>>                  }
>>              } else if (!pinned && q->core_id == OVS_CORE_UNSPEC) {
>> +                uint64_t cycle_hist = 0;
>> +
>>                  if (n_rxqs == 0) {
>>                      rxqs = xmalloc(sizeof *rxqs); @@ -3524,4 +3522,10 @@
>> rxq_scheduling(struct dp_netdev *dp, bool pinned) OVS_REQUIRES(dp-
>>> port_mutex)
>>                      rxqs = xrealloc(rxqs, sizeof *rxqs * (n_rxqs + 1));
>>                  }
>> +
>> +                for (unsigned i = 0; i < PMD_RXQ_INTERVAL_MAX; i++) {
>> +                    cycle_hist += dp_netdev_rxq_get_intrvl_cycles(q, i);
>> +                }
>> +                dp_netdev_rxq_set_cycles(q, RXQ_CYCLES_PROC_HIST,
>> + cycle_hist);
>> +
>>                  /* Store the queue. */
>>                  rxqs[n_rxqs++] = q;
>> --
>> 1.8.3.1
>>
>> _______________________________________________
>> dev mailing list
>> dev@openvswitch.org
>> https://mail.openvswitch.org/mailman/listinfo/ovs-dev
Kevin Traynor Nov. 23, 2017, 10:07 a.m. UTC | #3
On 11/13/2017 09:42 PM, Stokes, Ian wrote:
>> rxq_cycle_sort summed the latest cycles from each queue for sorting.
>> While each comparison was correct with the latest cycles, the cycles could
>> change between calls to rxq_cycle_sort. In order to use consistent values
>> through each call to rxq_cycle_sort, sum the cycles prior to
>> rxq_cycle_sort being called.
>>
>> Also, change return to 0 when values are equal.
>>
> 
> Thanks for this Kevin, I'll validate this patch with a view to add it to the DPDK merge branch, Minor comments below.
> 
>> Requested-by: Ilya Maximets <i.maximets@samsung.com>
> 
> @Ilya: do you have any comments? If not I'll look to push this in the next day after testing.
> 
>> Signed-off-by: Kevin Traynor <ktraynor@redhat.com>
>> ---
>> v2: No change, but was previously 3/3.
>>
>>  lib/dpif-netdev.c | 22 +++++++++++++---------
>>  1 file changed, 13 insertions(+), 9 deletions(-)
>>
>> diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index 66c14f9..d9e2912
>> 100644
>> --- a/lib/dpif-netdev.c
>> +++ b/lib/dpif-netdev.c
>> @@ -3458,18 +3458,14 @@ rxq_cycle_sort(const void *a, const void *b)
>>      struct dp_netdev_rxq *qb;
>>      uint64_t total_qa, total_qb;
>> -    unsigned i;
>>
>>      qa = *(struct dp_netdev_rxq **) a;
>>      qb = *(struct dp_netdev_rxq **) b;
>>
>> -    total_qa = total_qb = 0;
>> -    for (i = 0; i < PMD_RXQ_INTERVAL_MAX; i++) {
>> -        total_qa += dp_netdev_rxq_get_intrvl_cycles(qa, i);
>> -        total_qb += dp_netdev_rxq_get_intrvl_cycles(qb, i);
>> -    }
>> -    dp_netdev_rxq_set_cycles(qa, RXQ_CYCLES_PROC_HIST, total_qa);
>> -    dp_netdev_rxq_set_cycles(qb, RXQ_CYCLES_PROC_HIST, total_qb);
>> +    total_qa = dp_netdev_rxq_get_cycles(qa, RXQ_CYCLES_PROC_HIST);
>> +    total_qb = dp_netdev_rxq_get_cycles(qb, RXQ_CYCLES_PROC_HIST);
>>
> 
> Minor nit but I think the comment for this function should be extended now to give a more detail of what happening and context on possible return values. (3 possible return values now). Maybe the behavior is detailed elsewhere in the code when rxq_cycle_sort is called but I didn't spot it.
> 

Just to follow up - I've taken any changes around the return value out
of the patch and sent v3. When Alin and I figure out the best approach
for Windows, I will submit a separate patch to update the return value
and update the comments then.

> to I'm thinking for clarity this could be documented in the comment for the function overall?
>> -    if (total_qa >= total_qb) {
>> +    if (total_qa == total_qb) {
>> +        return 0;
>> +    } else if (total_qa > total_qb) {
>>          return -1;
>>      }
>> @@ -3519,4 +3515,6 @@ rxq_scheduling(struct dp_netdev *dp, bool pinned)
>> OVS_REQUIRES(dp->port_mutex)
>>                  }
>>              } else if (!pinned && q->core_id == OVS_CORE_UNSPEC) {
>> +                uint64_t cycle_hist = 0;
>> +
>>                  if (n_rxqs == 0) {
>>                      rxqs = xmalloc(sizeof *rxqs); @@ -3524,4 +3522,10 @@
>> rxq_scheduling(struct dp_netdev *dp, bool pinned) OVS_REQUIRES(dp-
>>> port_mutex)
>>                      rxqs = xrealloc(rxqs, sizeof *rxqs * (n_rxqs + 1));
>>                  }
>> +
>> +                for (unsigned i = 0; i < PMD_RXQ_INTERVAL_MAX; i++) {
>> +                    cycle_hist += dp_netdev_rxq_get_intrvl_cycles(q, i);
>> +                }
>> +                dp_netdev_rxq_set_cycles(q, RXQ_CYCLES_PROC_HIST,
>> + cycle_hist);
>> +
>>                  /* Store the queue. */
>>                  rxqs[n_rxqs++] = q;
>> --
>> 1.8.3.1
>>
>> _______________________________________________
>> dev mailing list
>> dev@openvswitch.org
>> https://mail.openvswitch.org/mailman/listinfo/ovs-dev
Stokes, Ian Nov. 23, 2017, 1:25 p.m. UTC | #4
> On 11/13/2017 09:42 PM, Stokes, Ian wrote:
> >> rxq_cycle_sort summed the latest cycles from each queue for sorting.
> >> While each comparison was correct with the latest cycles, the cycles
> >> could change between calls to rxq_cycle_sort. In order to use
> >> consistent values through each call to rxq_cycle_sort, sum the cycles
> >> prior to rxq_cycle_sort being called.
> >>
> >> Also, change return to 0 when values are equal.
> >>
> >
> > Thanks for this Kevin, I'll validate this patch with a view to add it to
> the DPDK merge branch, Minor comments below.
> >
> >> Requested-by: Ilya Maximets <i.maximets@samsung.com>
> >
> > @Ilya: do you have any comments? If not I'll look to push this in the
> next day after testing.
> >
> >> Signed-off-by: Kevin Traynor <ktraynor@redhat.com>
> >> ---
> >> v2: No change, but was previously 3/3.
> >>
> >>  lib/dpif-netdev.c | 22 +++++++++++++---------
> >>  1 file changed, 13 insertions(+), 9 deletions(-)
> >>
> >> diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index
> >> 66c14f9..d9e2912
> >> 100644
> >> --- a/lib/dpif-netdev.c
> >> +++ b/lib/dpif-netdev.c
> >> @@ -3458,18 +3458,14 @@ rxq_cycle_sort(const void *a, const void *b)
> >>      struct dp_netdev_rxq *qb;
> >>      uint64_t total_qa, total_qb;
> >> -    unsigned i;
> >>
> >>      qa = *(struct dp_netdev_rxq **) a;
> >>      qb = *(struct dp_netdev_rxq **) b;
> >>
> >> -    total_qa = total_qb = 0;
> >> -    for (i = 0; i < PMD_RXQ_INTERVAL_MAX; i++) {
> >> -        total_qa += dp_netdev_rxq_get_intrvl_cycles(qa, i);
> >> -        total_qb += dp_netdev_rxq_get_intrvl_cycles(qb, i);
> >> -    }
> >> -    dp_netdev_rxq_set_cycles(qa, RXQ_CYCLES_PROC_HIST, total_qa);
> >> -    dp_netdev_rxq_set_cycles(qb, RXQ_CYCLES_PROC_HIST, total_qb);
> >> +    total_qa = dp_netdev_rxq_get_cycles(qa, RXQ_CYCLES_PROC_HIST);
> >> +    total_qb = dp_netdev_rxq_get_cycles(qb, RXQ_CYCLES_PROC_HIST);
> >>
> >
> > Minor nit but I think the comment for this function should be extended
> now to give a more detail of what happening and context on possible return
> values. (3 possible return values now). Maybe the behavior is detailed
> elsewhere in the code when rxq_cycle_sort is called but I didn't spot it.
> >
> 
> Just to follow up - I've taken any changes around the return value out of
> the patch and sent v3. When Alin and I figure out the best approach for
> Windows, I will submit a separate patch to update the return value and
> update the comments then.
> 

Sounds Good, I'm starting to look at the v3 rxq patch now.

Thanks
Ian

> > to I'm thinking for clarity this could be documented in the comment for
> the function overall?
> >> -    if (total_qa >= total_qb) {
> >> +    if (total_qa == total_qb) {
> >> +        return 0;
> >> +    } else if (total_qa > total_qb) {
> >>          return -1;
> >>      }
> >> @@ -3519,4 +3515,6 @@ rxq_scheduling(struct dp_netdev *dp, bool
> >> pinned)
> >> OVS_REQUIRES(dp->port_mutex)
> >>                  }
> >>              } else if (!pinned && q->core_id == OVS_CORE_UNSPEC) {
> >> +                uint64_t cycle_hist = 0;
> >> +
> >>                  if (n_rxqs == 0) {
> >>                      rxqs = xmalloc(sizeof *rxqs); @@ -3524,4
> >> +3522,10 @@ rxq_scheduling(struct dp_netdev *dp, bool pinned)
> >> OVS_REQUIRES(dp-
> >>> port_mutex)
> >>                      rxqs = xrealloc(rxqs, sizeof *rxqs * (n_rxqs +
> 1));
> >>                  }
> >> +
> >> +                for (unsigned i = 0; i < PMD_RXQ_INTERVAL_MAX; i++) {
> >> +                    cycle_hist += dp_netdev_rxq_get_intrvl_cycles(q,
> i);
> >> +                }
> >> +                dp_netdev_rxq_set_cycles(q, RXQ_CYCLES_PROC_HIST,
> >> + cycle_hist);
> >> +
> >>                  /* Store the queue. */
> >>                  rxqs[n_rxqs++] = q;
> >> --
> >> 1.8.3.1
> >>
> >> _______________________________________________
> >> dev mailing list
> >> dev@openvswitch.org
> >> https://mail.openvswitch.org/mailman/listinfo/ovs-dev
diff mbox series

Patch

diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index 66c14f9..d9e2912 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -3458,18 +3458,14 @@  rxq_cycle_sort(const void *a, const void *b)
     struct dp_netdev_rxq *qb;
     uint64_t total_qa, total_qb;
-    unsigned i;
 
     qa = *(struct dp_netdev_rxq **) a;
     qb = *(struct dp_netdev_rxq **) b;
 
-    total_qa = total_qb = 0;
-    for (i = 0; i < PMD_RXQ_INTERVAL_MAX; i++) {
-        total_qa += dp_netdev_rxq_get_intrvl_cycles(qa, i);
-        total_qb += dp_netdev_rxq_get_intrvl_cycles(qb, i);
-    }
-    dp_netdev_rxq_set_cycles(qa, RXQ_CYCLES_PROC_HIST, total_qa);
-    dp_netdev_rxq_set_cycles(qb, RXQ_CYCLES_PROC_HIST, total_qb);
+    total_qa = dp_netdev_rxq_get_cycles(qa, RXQ_CYCLES_PROC_HIST);
+    total_qb = dp_netdev_rxq_get_cycles(qb, RXQ_CYCLES_PROC_HIST);
 
-    if (total_qa >= total_qb) {
+    if (total_qa == total_qb) {
+        return 0;
+    } else if (total_qa > total_qb) {
         return -1;
     }
@@ -3519,4 +3515,6 @@  rxq_scheduling(struct dp_netdev *dp, bool pinned) OVS_REQUIRES(dp->port_mutex)
                 }
             } else if (!pinned && q->core_id == OVS_CORE_UNSPEC) {
+                uint64_t cycle_hist = 0;
+
                 if (n_rxqs == 0) {
                     rxqs = xmalloc(sizeof *rxqs);
@@ -3524,4 +3522,10 @@  rxq_scheduling(struct dp_netdev *dp, bool pinned) OVS_REQUIRES(dp->port_mutex)
                     rxqs = xrealloc(rxqs, sizeof *rxqs * (n_rxqs + 1));
                 }
+
+                for (unsigned i = 0; i < PMD_RXQ_INTERVAL_MAX; i++) {
+                    cycle_hist += dp_netdev_rxq_get_intrvl_cycles(q, i);
+                }
+                dp_netdev_rxq_set_cycles(q, RXQ_CYCLES_PROC_HIST, cycle_hist);
+
                 /* Store the queue. */
                 rxqs[n_rxqs++] = q;