diff mbox series

[ovs-dev,4/5] dpif-netdev: Assign PMD for failed pinned rxqs.

Message ID 20210604211856.915563-5-ktraynor@redhat.com
State Superseded, archived
Headers show
Series Rxq scheduling updates. | expand

Commit Message

Kevin Traynor June 4, 2021, 9:18 p.m. UTC
Previously, if pmd-rxq-affinity was used to pin an rxq to
a core that was not in pmd-cpu-mask the rxq was not polled
and the user received a warning.

Now that pinned and non-pinned rxqs are assigned to PMDs in
a common call to rxq scheduling, if an invalid core is
selected in pmd-rxq-affinity the rxq can be assigned an
available PMD (if any).

A warning will still be logged as the requested core could
not be used.

Signed-off-by: Kevin Traynor <ktraynor@redhat.com>
---
 Documentation/topics/dpdk/pmd.rst |  6 +++---
 lib/dpif-netdev.c                 | 30 ++++++++++++++++++++++++++++--
 tests/pmd.at                      |  5 ++++-
 3 files changed, 35 insertions(+), 6 deletions(-)

Comments

Pai G, Sunil June 24, 2021, 7:51 a.m. UTC | #1
Hey Kevin , 

Patch looks good to me. 
Builds fine , all test cases here http://patchwork.ozlabs.org/project/openvswitch/patch/20210316154532.127858-1-ktraynor@redhat.com/  pass as well.


# ovs-appctl dpif-netdev/pmd-rxq-show
pmd thread numa_id 0 core_id 1:
  isolated : false
  port: dpdk0             queue-id:  0 (enabled)   pmd usage:  0 %
pmd thread numa_id 0 core_id 2:
  isolated : false
  port: vhostuserclient0  queue-id:  0 (enabled)   pmd usage:  0 %
  
# $OVS_DIR/utilities/ovs-vsctl set interface vhostuserclient0 other_config:pmd-rxq-affinity="0:99"

# ovs-appctl dpif-netdev/pmd-rxq-show
pmd thread numa_id 0 core_id 1:
  isolated : false
  port: dpdk0             queue-id:  0 (enabled)   pmd usage:  0 %
pmd thread numa_id 0 core_id 2:
  isolated : false
  port: vhostuserclient0  queue-id:  0 (enabled)   pmd usage:  0 %

from vswitchd log:
dpif_netdev|WARN|Core 99 cannot be pinned with port 'vhostuserclient0' rx queue 0. Use pmd-cpu-mask to enable a pmd on core 99. An alternative core will be assigned.
dpif_netdev|INFO|Performing pmd to rx queue assignment using cycles algorithm.
dpif_netdev|INFO|Core  1 on numa node 0 assigned port 'dpdk0' rx queue 0. (measured processing cycles 0).
dpif_netdev|INFO|Core  2 on numa node 0 assigned port 'vhostuserclient0' rx queue 0. (measured processing cycles 0).


<snipped>
David Marchand June 24, 2021, 3:04 p.m. UTC | #2
On Fri, Jun 4, 2021 at 11:19 PM Kevin Traynor <ktraynor@redhat.com> wrote:
>
> Previously, if pmd-rxq-affinity was used to pin an rxq to
> a core that was not in pmd-cpu-mask the rxq was not polled
> and the user received a warning.
>
> Now that pinned and non-pinned rxqs are assigned to PMDs in
> a common call to rxq scheduling, if an invalid core is
> selected in pmd-rxq-affinity the rxq can be assigned an
> available PMD (if any).
>
> A warning will still be logged as the requested core could
> not be used.
>
> Signed-off-by: Kevin Traynor <ktraynor@redhat.com>
> ---
>  Documentation/topics/dpdk/pmd.rst |  6 +++---
>  lib/dpif-netdev.c                 | 30 ++++++++++++++++++++++++++++--
>  tests/pmd.at                      |  5 ++++-
>  3 files changed, 35 insertions(+), 6 deletions(-)
>
> diff --git a/Documentation/topics/dpdk/pmd.rst b/Documentation/topics/dpdk/pmd.rst
> index d1c45cdfb..29ba53954 100644
> --- a/Documentation/topics/dpdk/pmd.rst
> +++ b/Documentation/topics/dpdk/pmd.rst
> @@ -108,7 +108,7 @@ means that this thread will only poll the *pinned* Rx queues.
>
>     If there are no *non-isolated* PMD threads, *non-pinned* RX queues will not
> -   be polled. Also, if the provided ``<core-id>`` is not available (e.g. the
> -   ``<core-id>`` is not in ``pmd-cpu-mask``), the RX queue will not be polled
> -   by any PMD thread.
> +   be polled. If the provided ``<core-id>`` is not available (e.g. the
> +   ``<core-id>`` is not in ``pmd-cpu-mask``), the RX queue will be assigned to
> +   a *non-isolated* PMD, that will remain *non-isolated*.
>
>  If ``pmd-rxq-affinity`` is not set for Rx queues, they will be assigned to PMDs
> diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
> index 61e0a516f..377573233 100644
> --- a/lib/dpif-netdev.c
> +++ b/lib/dpif-netdev.c
> @@ -5027,4 +5027,25 @@ find_sched_pmd_by_pmd(struct sched_numa_list *numa_list,
>  }
>
> +static struct sched_pmd *
> +find_sched_pmd_by_rxq(struct sched_numa_list *numa_list,
> +                      struct dp_netdev_rxq *rxq)
> +{
> +    struct sched_numa *numa;
> +
> +    HMAP_FOR_EACH (numa, node, &numa_list->numas) {
> +        for (unsigned i = 0; i < numa->n_pmds; i++) {
> +            struct sched_pmd *sched_pmd;
> +
> +            sched_pmd = &numa->pmds[i];
> +            for (int k = 0; k < sched_pmd->n_rxq; k++) {
> +                if (sched_pmd->rxqs[k] == rxq) {
> +                    return sched_pmd;
> +                }
> +            }
> +        }
> +    }
> +    return NULL;
> +}
> +
>  static struct sched_numa *
>  sched_numa_list_find_numa(struct sched_numa_list *numa_list,
> @@ -5408,5 +5429,6 @@ sched_numa_list_schedule(struct sched_numa_list *numa_list,
>                              "Core %2u cannot be pinned with "
>                              "port \'%s\' rx queue %d. Use pmd-cpu-mask to "
> -                            "enable a pmd on core %u.",
> +                            "enable a pmd on core %u. An alternative core "
> +                            "will be assigned.",
>                              rxq->core_id,
>                              netdev_rxq_get_name(rxq->rx),

To continue with my suggestion on patch 1 on only pushing unpinned
queues to the rxqs array, you could add this rxq in rxqs[] ...

> @@ -5453,5 +5475,9 @@ sched_numa_list_schedule(struct sched_numa_list *numa_list,
>
>          if (rxq->core_id != OVS_CORE_UNSPEC) {
> -            continue;
> +             /* This rxq should have been pinned, check it was. */
> +            sched_pmd = find_sched_pmd_by_rxq(numa_list, rxq);
> +            if (sched_pmd && sched_pmd->pmd->core_id == rxq->core_id) {
> +                continue;
> +            }

... and this check is unnneded.

WDYT?

>          }
>
> diff --git a/tests/pmd.at b/tests/pmd.at
> index 78105bf45..55977632a 100644
> --- a/tests/pmd.at
> +++ b/tests/pmd.at
> @@ -552,7 +552,10 @@ AT_CHECK([ovs-vsctl set Open_vSwitch . other_config:pmd-cpu-mask=6])
>
>  dnl We removed the cores requested by some queues from pmd-cpu-mask.
> -dnl Those queues will not be polled.
> +dnl Those queues will be polled by remaining non-isolated pmds.
>  AT_CHECK([ovs-appctl dpif-netdev/pmd-rxq-show | parse_pmd_rxq_show], [0], [dnl
> +p1 0 0 1
> +p1 1 0 1
>  p1 2 0 2
> +p1 3 0 1
>  ])
>
> --
> 2.31.1
>
Kevin Traynor July 1, 2021, 11:20 p.m. UTC | #3
On 24/06/2021 08:51, Pai G, Sunil wrote:
> Hey Kevin , 
> 
> Patch looks good to me. 
> Builds fine , all test cases here http://patchwork.ozlabs.org/project/openvswitch/patch/20210316154532.127858-1-ktraynor@redhat.com/  pass as well.
> 
> 
> # ovs-appctl dpif-netdev/pmd-rxq-show
> pmd thread numa_id 0 core_id 1:
>   isolated : false
>   port: dpdk0             queue-id:  0 (enabled)   pmd usage:  0 %
> pmd thread numa_id 0 core_id 2:
>   isolated : false
>   port: vhostuserclient0  queue-id:  0 (enabled)   pmd usage:  0 %
>   
> # $OVS_DIR/utilities/ovs-vsctl set interface vhostuserclient0 other_config:pmd-rxq-affinity="0:99"
> 
> # ovs-appctl dpif-netdev/pmd-rxq-show
> pmd thread numa_id 0 core_id 1:
>   isolated : false
>   port: dpdk0             queue-id:  0 (enabled)   pmd usage:  0 %
> pmd thread numa_id 0 core_id 2:
>   isolated : false
>   port: vhostuserclient0  queue-id:  0 (enabled)   pmd usage:  0 %
> 
> from vswitchd log:
> dpif_netdev|WARN|Core 99 cannot be pinned with port 'vhostuserclient0' rx queue 0. Use pmd-cpu-mask to enable a pmd on core 99. An alternative core will be assigned.
> dpif_netdev|INFO|Performing pmd to rx queue assignment using cycles algorithm.
> dpif_netdev|INFO|Core  1 on numa node 0 assigned port 'dpdk0' rx queue 0. (measured processing cycles 0).
> dpif_netdev|INFO|Core  2 on numa node 0 assigned port 'vhostuserclient0' rx queue 0. (measured processing cycles 0).
> 

nice, thanks for reviewing and testing this and the other patches.

Kevin.

> 
> <snipped>
>
Kevin Traynor July 1, 2021, 11:21 p.m. UTC | #4
On 24/06/2021 16:04, David Marchand wrote:
> On Fri, Jun 4, 2021 at 11:19 PM Kevin Traynor <ktraynor@redhat.com> wrote:
>>
>> Previously, if pmd-rxq-affinity was used to pin an rxq to
>> a core that was not in pmd-cpu-mask the rxq was not polled
>> and the user received a warning.
>>
>> Now that pinned and non-pinned rxqs are assigned to PMDs in
>> a common call to rxq scheduling, if an invalid core is
>> selected in pmd-rxq-affinity the rxq can be assigned an
>> available PMD (if any).
>>
>> A warning will still be logged as the requested core could
>> not be used.
>>
>> Signed-off-by: Kevin Traynor <ktraynor@redhat.com>
>> ---
>>  Documentation/topics/dpdk/pmd.rst |  6 +++---
>>  lib/dpif-netdev.c                 | 30 ++++++++++++++++++++++++++++--
>>  tests/pmd.at                      |  5 ++++-
>>  3 files changed, 35 insertions(+), 6 deletions(-)
>>
>> diff --git a/Documentation/topics/dpdk/pmd.rst b/Documentation/topics/dpdk/pmd.rst
>> index d1c45cdfb..29ba53954 100644
>> --- a/Documentation/topics/dpdk/pmd.rst
>> +++ b/Documentation/topics/dpdk/pmd.rst
>> @@ -108,7 +108,7 @@ means that this thread will only poll the *pinned* Rx queues.
>>
>>     If there are no *non-isolated* PMD threads, *non-pinned* RX queues will not
>> -   be polled. Also, if the provided ``<core-id>`` is not available (e.g. the
>> -   ``<core-id>`` is not in ``pmd-cpu-mask``), the RX queue will not be polled
>> -   by any PMD thread.
>> +   be polled. If the provided ``<core-id>`` is not available (e.g. the
>> +   ``<core-id>`` is not in ``pmd-cpu-mask``), the RX queue will be assigned to
>> +   a *non-isolated* PMD, that will remain *non-isolated*.
>>
>>  If ``pmd-rxq-affinity`` is not set for Rx queues, they will be assigned to PMDs
>> diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
>> index 61e0a516f..377573233 100644
>> --- a/lib/dpif-netdev.c
>> +++ b/lib/dpif-netdev.c
>> @@ -5027,4 +5027,25 @@ find_sched_pmd_by_pmd(struct sched_numa_list *numa_list,
>>  }
>>
>> +static struct sched_pmd *
>> +find_sched_pmd_by_rxq(struct sched_numa_list *numa_list,
>> +                      struct dp_netdev_rxq *rxq)
>> +{
>> +    struct sched_numa *numa;
>> +
>> +    HMAP_FOR_EACH (numa, node, &numa_list->numas) {
>> +        for (unsigned i = 0; i < numa->n_pmds; i++) {
>> +            struct sched_pmd *sched_pmd;
>> +
>> +            sched_pmd = &numa->pmds[i];
>> +            for (int k = 0; k < sched_pmd->n_rxq; k++) {
>> +                if (sched_pmd->rxqs[k] == rxq) {
>> +                    return sched_pmd;
>> +                }
>> +            }
>> +        }
>> +    }
>> +    return NULL;
>> +}
>> +
>>  static struct sched_numa *
>>  sched_numa_list_find_numa(struct sched_numa_list *numa_list,
>> @@ -5408,5 +5429,6 @@ sched_numa_list_schedule(struct sched_numa_list *numa_list,
>>                              "Core %2u cannot be pinned with "
>>                              "port \'%s\' rx queue %d. Use pmd-cpu-mask to "
>> -                            "enable a pmd on core %u.",
>> +                            "enable a pmd on core %u. An alternative core "
>> +                            "will be assigned.",
>>                              rxq->core_id,
>>                              netdev_rxq_get_name(rxq->rx),
> 
> To continue with my suggestion on patch 1 on only pushing unpinned
> queues to the rxqs array, you could add this rxq in rxqs[] ...
> 
>> @@ -5453,5 +5475,9 @@ sched_numa_list_schedule(struct sched_numa_list *numa_list,
>>
>>          if (rxq->core_id != OVS_CORE_UNSPEC) {
>> -            continue;
>> +             /* This rxq should have been pinned, check it was. */
>> +            sched_pmd = find_sched_pmd_by_rxq(numa_list, rxq);
>> +            if (sched_pmd && sched_pmd->pmd->core_id == rxq->core_id) {
>> +                continue;
>> +            }
> 
> ... and this check is unnneded.
> 
> WDYT?
> 

ok, you've convinced me and as a bonus, i don't need
find_sched_pmd_by_rxq() anymore, so it simplifies the code here and
removes some other code. Thanks for the nice suggestion.

>>          }
>>
>> diff --git a/tests/pmd.at b/tests/pmd.at
>> index 78105bf45..55977632a 100644
>> --- a/tests/pmd.at
>> +++ b/tests/pmd.at
>> @@ -552,7 +552,10 @@ AT_CHECK([ovs-vsctl set Open_vSwitch . other_config:pmd-cpu-mask=6])
>>
>>  dnl We removed the cores requested by some queues from pmd-cpu-mask.
>> -dnl Those queues will not be polled.
>> +dnl Those queues will be polled by remaining non-isolated pmds.
>>  AT_CHECK([ovs-appctl dpif-netdev/pmd-rxq-show | parse_pmd_rxq_show], [0], [dnl
>> +p1 0 0 1
>> +p1 1 0 1
>>  p1 2 0 2
>> +p1 3 0 1
>>  ])
>>
>> --
>> 2.31.1
>>
> 
> 
>
diff mbox series

Patch

diff --git a/Documentation/topics/dpdk/pmd.rst b/Documentation/topics/dpdk/pmd.rst
index d1c45cdfb..29ba53954 100644
--- a/Documentation/topics/dpdk/pmd.rst
+++ b/Documentation/topics/dpdk/pmd.rst
@@ -108,7 +108,7 @@  means that this thread will only poll the *pinned* Rx queues.
 
    If there are no *non-isolated* PMD threads, *non-pinned* RX queues will not
-   be polled. Also, if the provided ``<core-id>`` is not available (e.g. the
-   ``<core-id>`` is not in ``pmd-cpu-mask``), the RX queue will not be polled
-   by any PMD thread.
+   be polled. If the provided ``<core-id>`` is not available (e.g. the
+   ``<core-id>`` is not in ``pmd-cpu-mask``), the RX queue will be assigned to
+   a *non-isolated* PMD, that will remain *non-isolated*.
 
 If ``pmd-rxq-affinity`` is not set for Rx queues, they will be assigned to PMDs
diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index 61e0a516f..377573233 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -5027,4 +5027,25 @@  find_sched_pmd_by_pmd(struct sched_numa_list *numa_list,
 }
 
+static struct sched_pmd *
+find_sched_pmd_by_rxq(struct sched_numa_list *numa_list,
+                      struct dp_netdev_rxq *rxq)
+{
+    struct sched_numa *numa;
+
+    HMAP_FOR_EACH (numa, node, &numa_list->numas) {
+        for (unsigned i = 0; i < numa->n_pmds; i++) {
+            struct sched_pmd *sched_pmd;
+
+            sched_pmd = &numa->pmds[i];
+            for (int k = 0; k < sched_pmd->n_rxq; k++) {
+                if (sched_pmd->rxqs[k] == rxq) {
+                    return sched_pmd;
+                }
+            }
+        }
+    }
+    return NULL;
+}
+
 static struct sched_numa *
 sched_numa_list_find_numa(struct sched_numa_list *numa_list,
@@ -5408,5 +5429,6 @@  sched_numa_list_schedule(struct sched_numa_list *numa_list,
                             "Core %2u cannot be pinned with "
                             "port \'%s\' rx queue %d. Use pmd-cpu-mask to "
-                            "enable a pmd on core %u.",
+                            "enable a pmd on core %u. An alternative core "
+                            "will be assigned.",
                             rxq->core_id,
                             netdev_rxq_get_name(rxq->rx),
@@ -5453,5 +5475,9 @@  sched_numa_list_schedule(struct sched_numa_list *numa_list,
 
         if (rxq->core_id != OVS_CORE_UNSPEC) {
-            continue;
+             /* This rxq should have been pinned, check it was. */
+            sched_pmd = find_sched_pmd_by_rxq(numa_list, rxq);
+            if (sched_pmd && sched_pmd->pmd->core_id == rxq->core_id) {
+                continue;
+            }
         }
 
diff --git a/tests/pmd.at b/tests/pmd.at
index 78105bf45..55977632a 100644
--- a/tests/pmd.at
+++ b/tests/pmd.at
@@ -552,7 +552,10 @@  AT_CHECK([ovs-vsctl set Open_vSwitch . other_config:pmd-cpu-mask=6])
 
 dnl We removed the cores requested by some queues from pmd-cpu-mask.
-dnl Those queues will not be polled.
+dnl Those queues will be polled by remaining non-isolated pmds.
 AT_CHECK([ovs-appctl dpif-netdev/pmd-rxq-show | parse_pmd_rxq_show], [0], [dnl
+p1 0 0 1
+p1 1 0 1
 p1 2 0 2
+p1 3 0 1
 ])