diff mbox series

[ovs-dev,v3,2/5] dpif-netdev: Trigger parallel pmd reloads.

Message ID 1562241578-23180-3-git-send-email-david.marchand@redhat.com
State Changes Requested
Headers show
Series Quicker pmd threads reloads | expand

Commit Message

David Marchand July 4, 2019, 11:59 a.m. UTC
pmd reloads are currently serialised in each steps calling
reload_affected_pmds.
Any pmd processing packets, waiting on a mutex etc... will make other
pmd threads wait for a delay that can be undeterministic when syscalls
adds up.

Switch to a little busy loop on the control thread using the existing
per-pmd reload boolean.

The memory order on this atomic is rel-acq to have an explicit
synchronisation between the pmd threads and the control thread.

Signed-off-by: David Marchand <david.marchand@redhat.com>
---
Changelog since v2:
- remove unneeded synchronisation on pmd thread join (Ilya)
- "inlined" synchronisation loop in reload_affected_pmds

Changelog since v1:
- removed the introduced reloading_pmds atomic and reuse the existing
  pmd->reload boolean as a single synchronisation point (Ilya)

Changelog since RFC v1:
- added memory ordering on 'reloading_pmds' atomic to serve as a
  synchronisation point between pmd threads and control thread

---
 lib/dpif-netdev.c | 26 ++++++++++++--------------
 1 file changed, 12 insertions(+), 14 deletions(-)

Comments

Ilya Maximets July 8, 2019, 2:55 p.m. UTC | #1
On 04.07.2019 14:59, David Marchand wrote:
> pmd reloads are currently serialised in each steps calling
> reload_affected_pmds.
> Any pmd processing packets, waiting on a mutex etc... will make other
> pmd threads wait for a delay that can be undeterministic when syscalls
> adds up.
> 
> Switch to a little busy loop on the control thread using the existing
> per-pmd reload boolean.
> 
> The memory order on this atomic is rel-acq to have an explicit
> synchronisation between the pmd threads and the control thread.
> 
> Signed-off-by: David Marchand <david.marchand@redhat.com>
> ---
> Changelog since v2:
> - remove unneeded synchronisation on pmd thread join (Ilya)
> - "inlined" synchronisation loop in reload_affected_pmds
> 
> Changelog since v1:
> - removed the introduced reloading_pmds atomic and reuse the existing
>   pmd->reload boolean as a single synchronisation point (Ilya)
> 
> Changelog since RFC v1:
> - added memory ordering on 'reloading_pmds' atomic to serve as a
>   synchronisation point between pmd threads and control thread
> 
> ---
>  lib/dpif-netdev.c | 26 ++++++++++++--------------
>  1 file changed, 12 insertions(+), 14 deletions(-)
> 
> diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
> index 8eeec63..415107b 100644
> --- a/lib/dpif-netdev.c
> +++ b/lib/dpif-netdev.c
> @@ -649,9 +649,6 @@ struct dp_netdev_pmd_thread {
>      struct ovs_refcount ref_cnt;    /* Every reference must be refcount'ed. */
>      struct cmap_node node;          /* In 'dp->poll_threads'. */
>  
> -    pthread_cond_t cond;            /* For synchronizing pmd thread reload. */
> -    struct ovs_mutex cond_mutex;    /* Mutex for condition variable. */
> -
>      /* Per thread exact-match cache.  Note, the instance for cpu core
>       * NON_PMD_CORE_ID can be accessed by multiple threads, and thusly
>       * need to be protected by 'non_pmd_mutex'.  Every other instance
> @@ -1758,11 +1755,8 @@ dp_netdev_reload_pmd__(struct dp_netdev_pmd_thread *pmd)
>          return;
>      }
>  
> -    ovs_mutex_lock(&pmd->cond_mutex);
>      seq_change(pmd->reload_seq);
>      atomic_store_explicit(&pmd->reload, true, memory_order_release);
> -    ovs_mutex_cond_wait(&pmd->cond, &pmd->cond_mutex);
> -    ovs_mutex_unlock(&pmd->cond_mutex);
>  }
>  
>  static uint32_t
> @@ -4655,6 +4649,17 @@ reload_affected_pmds(struct dp_netdev *dp)
>          if (pmd->need_reload) {
>              flow_mark_flush(pmd);
>              dp_netdev_reload_pmd__(pmd);
> +        }
> +    }
> +
> +    CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
> +        if (pmd->need_reload) {
> +            bool reload;
> +
> +            do {
> +                atomic_read_explicit(&pmd->reload, &reload,
> +                                     memory_order_acquire);
> +            } while (reload);

If we'll ever set 'need_reload' for non-PMD thread there will be
kind of deadlock. We never waited for non-PMD thread previously,
because dp_netdev_reload_pmd__() has a special case for it.

>              pmd->need_reload = false;
>          }
>      }
> @@ -5842,11 +5847,8 @@ dpif_netdev_enable_upcall(struct dpif *dpif)
>  static void
>  dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread *pmd)
>  {
> -    ovs_mutex_lock(&pmd->cond_mutex);
> -    atomic_store_relaxed(&pmd->reload, false);
>      pmd->last_reload_seq = seq_read(pmd->reload_seq);
> -    xpthread_cond_signal(&pmd->cond);
> -    ovs_mutex_unlock(&pmd->cond_mutex);
> +    atomic_store_explicit(&pmd->reload, false, memory_order_release);
>  }
>  
>  /* Finds and refs the dp_netdev_pmd_thread on core 'core_id'.  Returns
> @@ -5931,8 +5933,6 @@ dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd, struct dp_netdev *dp,
>      pmd->reload_seq = seq_create();
>      pmd->last_reload_seq = seq_read(pmd->reload_seq);
>      atomic_init(&pmd->reload, false);
> -    xpthread_cond_init(&pmd->cond, NULL);
> -    ovs_mutex_init(&pmd->cond_mutex);
>      ovs_mutex_init(&pmd->flow_mutex);
>      ovs_mutex_init(&pmd->port_mutex);
>      cmap_init(&pmd->flow_table);
> @@ -5975,8 +5975,6 @@ dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread *pmd)
>      cmap_destroy(&pmd->flow_table);
>      ovs_mutex_destroy(&pmd->flow_mutex);
>      seq_destroy(pmd->reload_seq);
> -    xpthread_cond_destroy(&pmd->cond);
> -    ovs_mutex_destroy(&pmd->cond_mutex);
>      ovs_mutex_destroy(&pmd->port_mutex);
>      free(pmd);
>  }
>
Ilya Maximets July 8, 2019, 3:04 p.m. UTC | #2
On 08.07.2019 17:55, Ilya Maximets wrote:
> On 04.07.2019 14:59, David Marchand wrote:
>> pmd reloads are currently serialised in each steps calling
>> reload_affected_pmds.
>> Any pmd processing packets, waiting on a mutex etc... will make other
>> pmd threads wait for a delay that can be undeterministic when syscalls
>> adds up.
>>
>> Switch to a little busy loop on the control thread using the existing
>> per-pmd reload boolean.
>>
>> The memory order on this atomic is rel-acq to have an explicit
>> synchronisation between the pmd threads and the control thread.
>>
>> Signed-off-by: David Marchand <david.marchand@redhat.com>
>> ---
>> Changelog since v2:
>> - remove unneeded synchronisation on pmd thread join (Ilya)
>> - "inlined" synchronisation loop in reload_affected_pmds
>>
>> Changelog since v1:
>> - removed the introduced reloading_pmds atomic and reuse the existing
>>   pmd->reload boolean as a single synchronisation point (Ilya)
>>
>> Changelog since RFC v1:
>> - added memory ordering on 'reloading_pmds' atomic to serve as a
>>   synchronisation point between pmd threads and control thread
>>
>> ---
>>  lib/dpif-netdev.c | 26 ++++++++++++--------------
>>  1 file changed, 12 insertions(+), 14 deletions(-)
>>
>> diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
>> index 8eeec63..415107b 100644
>> --- a/lib/dpif-netdev.c
>> +++ b/lib/dpif-netdev.c
>> @@ -649,9 +649,6 @@ struct dp_netdev_pmd_thread {
>>      struct ovs_refcount ref_cnt;    /* Every reference must be refcount'ed. */
>>      struct cmap_node node;          /* In 'dp->poll_threads'. */
>>  
>> -    pthread_cond_t cond;            /* For synchronizing pmd thread reload. */
>> -    struct ovs_mutex cond_mutex;    /* Mutex for condition variable. */
>> -
>>      /* Per thread exact-match cache.  Note, the instance for cpu core
>>       * NON_PMD_CORE_ID can be accessed by multiple threads, and thusly
>>       * need to be protected by 'non_pmd_mutex'.  Every other instance
>> @@ -1758,11 +1755,8 @@ dp_netdev_reload_pmd__(struct dp_netdev_pmd_thread *pmd)
>>          return;
>>      }
>>  
>> -    ovs_mutex_lock(&pmd->cond_mutex);
>>      seq_change(pmd->reload_seq);
>>      atomic_store_explicit(&pmd->reload, true, memory_order_release);
>> -    ovs_mutex_cond_wait(&pmd->cond, &pmd->cond_mutex);
>> -    ovs_mutex_unlock(&pmd->cond_mutex);
>>  }
>>  
>>  static uint32_t
>> @@ -4655,6 +4649,17 @@ reload_affected_pmds(struct dp_netdev *dp)
>>          if (pmd->need_reload) {
>>              flow_mark_flush(pmd);
>>              dp_netdev_reload_pmd__(pmd);
>> +        }
>> +    }
>> +
>> +    CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
>> +        if (pmd->need_reload) {
>> +            bool reload;
>> +
>> +            do {
>> +                atomic_read_explicit(&pmd->reload, &reload,
>> +                                     memory_order_acquire);
>> +            } while (reload);
> 
> If we'll ever set 'need_reload' for non-PMD thread there will be
> kind of deadlock. We never waited for non-PMD thread previously,
> because dp_netdev_reload_pmd__() has a special case for it.

You may try to disable emc for internal port to reproduce.

Best regards, Ilya Maximets.
David Marchand July 9, 2019, 7:14 a.m. UTC | #3
Hello Ilya,

On Mon, Jul 8, 2019 at 4:55 PM Ilya Maximets <i.maximets@samsung.com> wrote:

> On 04.07.2019 14:59, David Marchand wrote:
> > pmd reloads are currently serialised in each steps calling
> > reload_affected_pmds.
> > Any pmd processing packets, waiting on a mutex etc... will make other
> > pmd threads wait for a delay that can be undeterministic when syscalls
> > adds up.
> >
> > Switch to a little busy loop on the control thread using the existing
> > per-pmd reload boolean.
> >
> > The memory order on this atomic is rel-acq to have an explicit
> > synchronisation between the pmd threads and the control thread.
> >
> > Signed-off-by: David Marchand <david.marchand@redhat.com>
> > ---
> > Changelog since v2:
> > - remove unneeded synchronisation on pmd thread join (Ilya)
> > - "inlined" synchronisation loop in reload_affected_pmds
> >
> > Changelog since v1:
> > - removed the introduced reloading_pmds atomic and reuse the existing
> >   pmd->reload boolean as a single synchronisation point (Ilya)
> >
> > Changelog since RFC v1:
> > - added memory ordering on 'reloading_pmds' atomic to serve as a
> >   synchronisation point between pmd threads and control thread
> >
> > ---
> >  lib/dpif-netdev.c | 26 ++++++++++++--------------
> >  1 file changed, 12 insertions(+), 14 deletions(-)
> >
> > diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
> > index 8eeec63..415107b 100644
> > --- a/lib/dpif-netdev.c
> > +++ b/lib/dpif-netdev.c
> > @@ -649,9 +649,6 @@ struct dp_netdev_pmd_thread {
> >      struct ovs_refcount ref_cnt;    /* Every reference must be
> refcount'ed. */
> >      struct cmap_node node;          /* In 'dp->poll_threads'. */
> >
> > -    pthread_cond_t cond;            /* For synchronizing pmd thread
> reload. */
> > -    struct ovs_mutex cond_mutex;    /* Mutex for condition variable. */
> > -
> >      /* Per thread exact-match cache.  Note, the instance for cpu core
> >       * NON_PMD_CORE_ID can be accessed by multiple threads, and thusly
> >       * need to be protected by 'non_pmd_mutex'.  Every other instance
> > @@ -1758,11 +1755,8 @@ dp_netdev_reload_pmd__(struct
> dp_netdev_pmd_thread *pmd)
> >          return;
> >      }
> >
> > -    ovs_mutex_lock(&pmd->cond_mutex);
> >      seq_change(pmd->reload_seq);
> >      atomic_store_explicit(&pmd->reload, true, memory_order_release);
> > -    ovs_mutex_cond_wait(&pmd->cond, &pmd->cond_mutex);
> > -    ovs_mutex_unlock(&pmd->cond_mutex);
> >  }
> >
> >  static uint32_t
> > @@ -4655,6 +4649,17 @@ reload_affected_pmds(struct dp_netdev *dp)
> >          if (pmd->need_reload) {
> >              flow_mark_flush(pmd);
> >              dp_netdev_reload_pmd__(pmd);
> > +        }
> > +    }
> > +
> > +    CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
> > +        if (pmd->need_reload) {
> > +            bool reload;
> > +
> > +            do {
> > +                atomic_read_explicit(&pmd->reload, &reload,
> > +                                     memory_order_acquire);
> > +            } while (reload);
>
> If we'll ever set 'need_reload' for non-PMD thread there will be
> kind of deadlock. We never waited for non-PMD thread previously,
> because dp_netdev_reload_pmd__() has a special case for it.
>

We can skip the non-PMD thread in this loop for consistency with
dp_netdev_reload_pmd__() if this is what you meant.

But for a non-PMD thread, pmd->reload is set to false at init and is never
touched after this.
So in this case, this loop is supposed to break right away.
Ilya Maximets July 9, 2019, 7:35 a.m. UTC | #4
On 09.07.2019 10:14, David Marchand wrote:
> Hello Ilya,
> 
> On Mon, Jul 8, 2019 at 4:55 PM Ilya Maximets <i.maximets@samsung.com <mailto:i.maximets@samsung.com>> wrote:
> 
>     On 04.07.2019 14:59, David Marchand wrote:
>     > pmd reloads are currently serialised in each steps calling
>     > reload_affected_pmds.
>     > Any pmd processing packets, waiting on a mutex etc... will make other
>     > pmd threads wait for a delay that can be undeterministic when syscalls
>     > adds up.
>     >
>     > Switch to a little busy loop on the control thread using the existing
>     > per-pmd reload boolean.
>     >
>     > The memory order on this atomic is rel-acq to have an explicit
>     > synchronisation between the pmd threads and the control thread.
>     >
>     > Signed-off-by: David Marchand <david.marchand@redhat.com <mailto:david.marchand@redhat.com>>
>     > ---
>     > Changelog since v2:
>     > - remove unneeded synchronisation on pmd thread join (Ilya)
>     > - "inlined" synchronisation loop in reload_affected_pmds
>     >
>     > Changelog since v1:
>     > - removed the introduced reloading_pmds atomic and reuse the existing
>     >   pmd->reload boolean as a single synchronisation point (Ilya)
>     >
>     > Changelog since RFC v1:
>     > - added memory ordering on 'reloading_pmds' atomic to serve as a
>     >   synchronisation point between pmd threads and control thread
>     >
>     > ---
>     >  lib/dpif-netdev.c | 26 ++++++++++++--------------
>     >  1 file changed, 12 insertions(+), 14 deletions(-)
>     >
>     > diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
>     > index 8eeec63..415107b 100644
>     > --- a/lib/dpif-netdev.c
>     > +++ b/lib/dpif-netdev.c
>     > @@ -649,9 +649,6 @@ struct dp_netdev_pmd_thread {
>     >      struct ovs_refcount ref_cnt;    /* Every reference must be refcount'ed. */
>     >      struct cmap_node node;          /* In 'dp->poll_threads'. */
>     > 
>     > -    pthread_cond_t cond;            /* For synchronizing pmd thread reload. */
>     > -    struct ovs_mutex cond_mutex;    /* Mutex for condition variable. */
>     > -
>     >      /* Per thread exact-match cache.  Note, the instance for cpu core
>     >       * NON_PMD_CORE_ID can be accessed by multiple threads, and thusly
>     >       * need to be protected by 'non_pmd_mutex'.  Every other instance
>     > @@ -1758,11 +1755,8 @@ dp_netdev_reload_pmd__(struct dp_netdev_pmd_thread *pmd)
>     >          return;
>     >      }
>     > 
>     > -    ovs_mutex_lock(&pmd->cond_mutex);
>     >      seq_change(pmd->reload_seq);
>     >      atomic_store_explicit(&pmd->reload, true, memory_order_release);
>     > -    ovs_mutex_cond_wait(&pmd->cond, &pmd->cond_mutex);
>     > -    ovs_mutex_unlock(&pmd->cond_mutex);
>     >  }
>     > 
>     >  static uint32_t
>     > @@ -4655,6 +4649,17 @@ reload_affected_pmds(struct dp_netdev *dp)
>     >          if (pmd->need_reload) {
>     >              flow_mark_flush(pmd);
>     >              dp_netdev_reload_pmd__(pmd);
>     > +        }
>     > +    }
>     > +
>     > +    CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
>     > +        if (pmd->need_reload) {
>     > +            bool reload;
>     > +
>     > +            do {
>     > +                atomic_read_explicit(&pmd->reload, &reload,
>     > +                                     memory_order_acquire);
>     > +            } while (reload);
> 
>     If we'll ever set 'need_reload' for non-PMD thread there will be
>     kind of deadlock. We never waited for non-PMD thread previously,
>     because dp_netdev_reload_pmd__() has a special case for it.
> 
> 
> We can skip the non-PMD thread in this loop for consistency with dp_netdev_reload_pmd__() if this is what you meant.
> 
> But for a non-PMD thread, pmd->reload is set to false at init and is never touched after this.
> So in this case, this loop is supposed to break right away.

Hmm. Yes, it seems that you're right. However, I think, it's better to
not wait for non-PMD thread anyway for consistency and better readability.

Best regards, Ilya Maximets.
David Marchand July 9, 2019, 8 a.m. UTC | #5
On Tue, Jul 9, 2019 at 9:36 AM Ilya Maximets <i.maximets@samsung.com> wrote:

> On 09.07.2019 10:14, David Marchand wrote:
> > We can skip the non-PMD thread in this loop for consistency with
> dp_netdev_reload_pmd__() if this is what you meant.
> >
> > But for a non-PMD thread, pmd->reload is set to false at init and is
> never touched after this.
> > So in this case, this loop is supposed to break right away.
>
> Hmm. Yes, it seems that you're right. However, I think, it's better to
> not wait for non-PMD thread anyway for consistency and better readability.
>

I agree, skipping the non-PMD thread leaves no question on what to expect
of the code.
I will prepare a v4.

Is there anything else to rework in this series?

I will be on PTO on thursday evening for 10 days, so I'd like to address
any remaining comment by thursday morning.
Ilya Maximets July 9, 2019, 10:17 a.m. UTC | #6
On 09.07.2019 11:00, David Marchand wrote:
> 
> 
> On Tue, Jul 9, 2019 at 9:36 AM Ilya Maximets <i.maximets@samsung.com <mailto:i.maximets@samsung.com>> wrote:
> 
>     On 09.07.2019 10:14, David Marchand wrote:
>     > We can skip the non-PMD thread in this loop for consistency with dp_netdev_reload_pmd__() if this is what you meant.
>     >
>     > But for a non-PMD thread, pmd->reload is set to false at init and is never touched after this.
>     > So in this case, this loop is supposed to break right away.
> 
>     Hmm. Yes, it seems that you're right. However, I think, it's better to
>     not wait for non-PMD thread anyway for consistency and better readability.
> 
> 
> I agree, skipping the non-PMD thread leaves no question on what to expect of the code.
> I will prepare a v4.
> 
> Is there anything else to rework in this series?

All other code looks fine. Looking forward for v4.

> 
> I will be on PTO on thursday evening for 10 days, so I'd like to address any remaining comment by thursday morning.
> 
> -- 
> David Marchand
Stokes, Ian July 9, 2019, 10:21 a.m. UTC | #7
> On 09.07.2019 11:00, David Marchand wrote:
> >
> >
> > On Tue, Jul 9, 2019 at 9:36 AM Ilya Maximets <i.maximets@samsung.com
> <mailto:i.maximets@samsung.com>> wrote:
> >
> >     On 09.07.2019 10:14, David Marchand wrote:
> >     > We can skip the non-PMD thread in this loop for consistency with
> dp_netdev_reload_pmd__() if this is what you meant.
> >     >
> >     > But for a non-PMD thread, pmd->reload is set to false at init and
> is never touched after this.
> >     > So in this case, this loop is supposed to break right away.
> >
> >     Hmm. Yes, it seems that you're right. However, I think, it's better
> to
> >     not wait for non-PMD thread anyway for consistency and better
> readability.
> >
> >
> > I agree, skipping the non-PMD thread leaves no question on what to
> expect of the code.
> > I will prepare a v4.
> >
> > Is there anything else to rework in this series?
> 
> All other code looks fine. Looking forward for v4.
> 

+1, ran some validation tests yesterday on the new patches, didn’t see any issues on my side. With the changes for the v4 flagged by Ilya above it should be OK.

Regards
Ian
> >
> > I will be on PTO on thursday evening for 10 days, so I'd like to address
> any remaining comment by thursday morning.
> >
> > --
> > David Marchand
diff mbox series

Patch

diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index 8eeec63..415107b 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -649,9 +649,6 @@  struct dp_netdev_pmd_thread {
     struct ovs_refcount ref_cnt;    /* Every reference must be refcount'ed. */
     struct cmap_node node;          /* In 'dp->poll_threads'. */
 
-    pthread_cond_t cond;            /* For synchronizing pmd thread reload. */
-    struct ovs_mutex cond_mutex;    /* Mutex for condition variable. */
-
     /* Per thread exact-match cache.  Note, the instance for cpu core
      * NON_PMD_CORE_ID can be accessed by multiple threads, and thusly
      * need to be protected by 'non_pmd_mutex'.  Every other instance
@@ -1758,11 +1755,8 @@  dp_netdev_reload_pmd__(struct dp_netdev_pmd_thread *pmd)
         return;
     }
 
-    ovs_mutex_lock(&pmd->cond_mutex);
     seq_change(pmd->reload_seq);
     atomic_store_explicit(&pmd->reload, true, memory_order_release);
-    ovs_mutex_cond_wait(&pmd->cond, &pmd->cond_mutex);
-    ovs_mutex_unlock(&pmd->cond_mutex);
 }
 
 static uint32_t
@@ -4655,6 +4649,17 @@  reload_affected_pmds(struct dp_netdev *dp)
         if (pmd->need_reload) {
             flow_mark_flush(pmd);
             dp_netdev_reload_pmd__(pmd);
+        }
+    }
+
+    CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
+        if (pmd->need_reload) {
+            bool reload;
+
+            do {
+                atomic_read_explicit(&pmd->reload, &reload,
+                                     memory_order_acquire);
+            } while (reload);
             pmd->need_reload = false;
         }
     }
@@ -5842,11 +5847,8 @@  dpif_netdev_enable_upcall(struct dpif *dpif)
 static void
 dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread *pmd)
 {
-    ovs_mutex_lock(&pmd->cond_mutex);
-    atomic_store_relaxed(&pmd->reload, false);
     pmd->last_reload_seq = seq_read(pmd->reload_seq);
-    xpthread_cond_signal(&pmd->cond);
-    ovs_mutex_unlock(&pmd->cond_mutex);
+    atomic_store_explicit(&pmd->reload, false, memory_order_release);
 }
 
 /* Finds and refs the dp_netdev_pmd_thread on core 'core_id'.  Returns
@@ -5931,8 +5933,6 @@  dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd, struct dp_netdev *dp,
     pmd->reload_seq = seq_create();
     pmd->last_reload_seq = seq_read(pmd->reload_seq);
     atomic_init(&pmd->reload, false);
-    xpthread_cond_init(&pmd->cond, NULL);
-    ovs_mutex_init(&pmd->cond_mutex);
     ovs_mutex_init(&pmd->flow_mutex);
     ovs_mutex_init(&pmd->port_mutex);
     cmap_init(&pmd->flow_table);
@@ -5975,8 +5975,6 @@  dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread *pmd)
     cmap_destroy(&pmd->flow_table);
     ovs_mutex_destroy(&pmd->flow_mutex);
     seq_destroy(pmd->reload_seq);
-    xpthread_cond_destroy(&pmd->cond);
-    ovs_mutex_destroy(&pmd->cond_mutex);
     ovs_mutex_destroy(&pmd->port_mutex);
     free(pmd);
 }