diff mbox series

[ovs-dev,RFC,ovn,3/5] Remove multithreading from pinctrl.

Message ID 20191018204259.1113-4-mmichels@redhat.com
State Deferred
Headers show
Series Separate pinctrl to its own process | expand

Commit Message

Mark Michelson Oct. 18, 2019, 8:42 p.m. UTC
This more-or-less gets pinctrl back to how it was prior to being made
multithreaded for OVS 2.11. Since pinctrl is in its own process, there's
no immediate need to have multiple threads.

Signed-off-by: Mark Michelson <mmichels@redhat.com>
---
 controller/pinctrl.c | 736 +++++++++++++--------------------------------------
 1 file changed, 190 insertions(+), 546 deletions(-)
diff mbox series

Patch

diff --git a/controller/pinctrl.c b/controller/pinctrl.c
index d826da186..edf6ccfb0 100644
--- a/controller/pinctrl.c
+++ b/controller/pinctrl.c
@@ -60,199 +60,84 @@ 
 
 VLOG_DEFINE_THIS_MODULE(pinctrl);
 
-/* pinctrl module creates a thread - pinctrl_handler to handle
- * the packet-ins from ovs-vswitchd. Some of the OVN actions
- * are translated to OF 'controller' actions. See include/ovn/actions.h
- * for more details.
- *
- * pinctrl_handler thread doesn't access the Southbound IDL object. But
- * some of the OVN actions which gets translated to 'controller'
- * OF action, require data from Southbound DB.  Below are the details
- * on how these actions are implemented.
- *
- * pinctrl_run() function is called by ovn-controller main thread.
- * A Mutex - 'pinctrl_mutex' is used between the pinctrl_handler() thread
- * and pinctrl_run().
- *
- *   - dns_lookup -     In order to do a DNS lookup, this action needs
- *                      to access the 'DNS' table. pinctrl_run() builds a
- *                      local DNS cache - 'dns_cache'. See sync_dns_cache()
- *                      for more details.
- *                      The function 'pinctrl_handle_dns_lookup()' (which is
- *                      called with in the pinctrl_handler thread) looks into
- *                      the local DNS cache to resolve the DNS requests.
- *
- *   - put_arp/put_nd - These actions stores the IPv4/IPv6 and MAC addresses
- *                      in the 'MAC_Binding' table.
- *                      The function 'pinctrl_handle_put_mac_binding()' (which
- *                      is called with in the pinctrl_handler thread), stores
- *                      the IPv4/IPv6 and MAC addresses in the
- *                      hmap - put_mac_bindings.
- *
- *                      pinctrl_run(), reads these mac bindings from the hmap
- *                      'put_mac_bindings' and writes to the 'MAC_Binding'
- *                      table in the Southbound DB.
- *
- *   - arp/nd_ns      - These actions generate an ARP/IPv6 Neighbor solicit
- *                      requests. The original packets are buffered and
- *                      injected back when put_arp/put_nd resolves
- *                      corresponding ARP/IPv6 Neighbor solicit requests.
- *                      When pinctrl_run(), writes the mac bindings from the
- *                      'put_mac_bindings' hmap to the MAC_Binding table in
- *                      SB DB, run_buffered_binding will add the buffered
- *                      packets to buffered_mac_bindings and notify
- *                      pinctrl_handler.
- *
- *                      The pinctrl_handler thread calls the function -
- *                      send_mac_binding_buffered_pkts(), which uses
- *                      the hmap - 'buffered_mac_bindings' and reinjects the
- *                      buffered packets.
- *
- *    - igmp          - This action punts an IGMP packet to the controller
- *                      which maintains multicast group information. The
- *                      multicast groups (mcast_snoop_map) are synced to
- *                      the 'IGMP_Group' table by ip_mcast_sync().
- *                      ip_mcast_sync() also reads the 'IP_Multicast'
- *                      (snooping and querier) configuration and builds a
- *                      local configuration mcast_cfg_map.
- *                      ip_mcast_snoop_run() which runs in the
- *                      pinctrl_handler() thread configures the per datapath
- *                      mcast_snoop_map entries according to mcast_cfg_map.
- *
- * pinctrl module also periodically sends IPv6 Router Solicitation requests
- * and gARPs (for the router gateway IPs and configured NAT addresses).
- *
- * IPv6 RA handling - pinctrl_run() prepares the IPv6 RA information
- *                    (see prepare_ipv6_ras()) in the shash 'ipv6_ras' by
- *                    looking into the Southbound DB table - Port_Binding.
- *
- *                    pinctrl_handler thread sends the periodic IPv6 RAs using
- *                    the shash - 'ipv6_ras'
- *
- * g/rARP handling    - pinctrl_run() prepares the g/rARP information
- *                     (see send_garp_rarp_prepare()) in the shash
- *                     'send_garp_rarp_data' by looking into the
- *                     Southbound DB table Port_Binding.
- *                     pinctrl_handler() thread sends these gARPs using the
- *                     shash 'send_garp_rarp_data'.
- *
- * IGMP Queries     - pinctrl_run() prepares the IGMP queries (at most one
- *                    per local datapath) based on the mcast_snoop_map
- *                    contents and stores them in mcast_query_list.
- *
- *                    pinctrl_handler thread sends the periodic IGMP queries
- *                    by walking the mcast_query_list.
- *
- * Notification between pinctrl_handler() and pinctrl_run()
- * -------------------------------------------------------
- * 'struct seq' is used for notification between pinctrl_handler() thread
- *  and pinctrl_run().
- *  'pinctrl_handler_seq' is used by pinctrl_run() to
- *  wake up pinctrl_handler thread from poll_block() if any changes happened
- *  in 'send_garp_rarp_data', 'ipv6_ras' and 'buffered_mac_bindings'
- *  structures.
- *
- *  'pinctrl_main_seq' is used by pinctrl_handler() thread to wake up
- *  the main thread from poll_block() when mac bindings/igmp groups need to
- *  be updated in the Southboubd DB.
- * */
-
-static struct ovs_mutex pinctrl_mutex = OVS_MUTEX_INITIALIZER;
-static struct seq *pinctrl_handler_seq;
-static struct seq *pinctrl_main_seq;
-
-static void *pinctrl_handler(void *arg);
-
-struct pinctrl {
-    char *br_int_name;
-    pthread_t pinctrl_thread;
-    /* Latch to destroy the 'pinctrl_thread' */
-    struct latch pinctrl_thread_exit;
-};
+/* OpenFlow connection to the switch. */
+static struct rconn *swconn;
 
-static struct pinctrl pinctrl;
+/* Last seen sequence number for 'swconn'.  When this differs from
+ * rconn_get_connection_seqno(rconn), 'swconn' has reconnected. */
+static unsigned int conn_seq_no = 0;
+
+/* Next IPV6 RA in seconds. */
+static long long int send_ipv6_ra_time = LLONG_MAX;
+/* Next GARP announcement in ms. */
+static long long int send_garp_rarp_time = LLONG_MAX;
+/* Next multicast query (IGMP) in ms. */
+static long long int send_mcast_query_time = LLONG_MAX;
 
 static void init_buffered_packets_map(void);
 static void destroy_buffered_packets_map(void);
-static void
-run_buffered_binding(struct ovsdb_idl_index *sbrec_mac_binding_by_lport_ip,
-                     const struct hmap *local_datapaths)
-    OVS_REQUIRES(pinctrl_mutex);
+static void run_buffered_binding(
+        struct ovsdb_idl_index *sbrec_mac_binding_by_lport_ip,
+        const struct hmap *local_datapaths);
 
 static void pinctrl_handle_put_mac_binding(const struct flow *md,
                                            const struct flow *headers,
-                                           bool is_arp)
-    OVS_REQUIRES(pinctrl_mutex);
+                                           bool is_arp);
 static void init_put_mac_bindings(void);
 static void destroy_put_mac_bindings(void);
 static void run_put_mac_bindings(
     struct ovsdb_idl_txn *ovnsb_idl_txn,
     struct ovsdb_idl_index *sbrec_datapath_binding_by_key,
     struct ovsdb_idl_index *sbrec_port_binding_by_key,
-    struct ovsdb_idl_index *sbrec_mac_binding_by_lport_ip)
-    OVS_REQUIRES(pinctrl_mutex);
+    struct ovsdb_idl_index *sbrec_mac_binding_by_lport_ip);
 static void wait_put_mac_bindings(struct ovsdb_idl_txn *ovnsb_idl_txn);
 static void flush_put_mac_bindings(void);
-static void send_mac_binding_buffered_pkts(struct rconn *swconn)
-    OVS_REQUIRES(pinctrl_mutex);
+static void send_mac_binding_buffered_pkts(void);
 
 static void init_send_garps_rarps(void);
 static void destroy_send_garps_rarps(void);
-static void send_garp_rarp_wait(long long int send_garp_rarp_time);
+static void send_garp_rarp_wait(void);
 static void send_garp_rarp_prepare(
     struct ovsdb_idl_index *sbrec_port_binding_by_datapath,
     struct ovsdb_idl_index *sbrec_port_binding_by_name,
     const struct ovsrec_bridge *,
     const struct sbrec_chassis *,
     const struct hmap *local_datapaths,
-    const struct sset *active_tunnels)
-    OVS_REQUIRES(pinctrl_mutex);
-static void send_garp_rarp_run(struct rconn *swconn,
-                               long long int *send_garp_rarp_time)
-    OVS_REQUIRES(pinctrl_mutex);
-static void pinctrl_handle_nd_na(struct rconn *swconn,
-                                 const struct flow *ip_flow,
+    const struct sset *active_tunnels);
+static void send_garp_rarp_run(void);
+static void pinctrl_handle_nd_na(const struct flow *ip_flow,
                                  const struct match *md,
                                  struct ofpbuf *userdata,
                                  bool is_router);
 static void reload_metadata(struct ofpbuf *ofpacts,
                             const struct match *md);
 static void pinctrl_handle_put_nd_ra_opts(
-    struct rconn *swconn,
     const struct flow *ip_flow, struct dp_packet *pkt_in,
     struct ofputil_packet_in *pin, struct ofpbuf *userdata,
     struct ofpbuf *continuation);
-static void pinctrl_handle_nd_ns(struct rconn *swconn,
-                                 const struct flow *ip_flow,
+static void pinctrl_handle_nd_ns(const struct flow *ip_flow,
                                  struct dp_packet *pkt_in,
                                  const struct match *md,
                                  struct ofpbuf *userdata);
-static void pinctrl_handle_put_icmp4_frag_mtu(struct rconn *swconn,
-                                              const struct flow *in_flow,
+static void pinctrl_handle_put_icmp4_frag_mtu(const struct flow *in_flow,
                                               struct dp_packet *pkt_in,
                                               struct ofputil_packet_in *pin,
                                               struct ofpbuf *userdata,
                                               struct ofpbuf *continuation);
 static void
-pinctrl_handle_event(struct ofpbuf *userdata)
-    OVS_REQUIRES(pinctrl_mutex);
+pinctrl_handle_event(struct ofpbuf *userdata);
 static void wait_controller_event(struct ovsdb_idl_txn *ovnsb_idl_txn);
 static void init_ipv6_ras(void);
 static void destroy_ipv6_ras(void);
-static void ipv6_ra_wait(long long int send_ipv6_ra_time);
-static void prepare_ipv6_ras(const struct hmap *local_datapaths)
-    OVS_REQUIRES(pinctrl_mutex);
-static void send_ipv6_ras(struct rconn *swconn,
-                          long long int *send_ipv6_ra_time)
-    OVS_REQUIRES(pinctrl_mutex);
+static void ipv6_ra_wait(void);
+static void prepare_ipv6_ras(const struct hmap *local_datapaths);
+static void send_ipv6_ras(void);
 
 static void ip_mcast_snoop_init(void);
 static void ip_mcast_snoop_destroy(void);
-static void ip_mcast_snoop_run(void)
-    OVS_REQUIRES(pinctrl_mutex);
-static void ip_mcast_querier_run(struct rconn *swconn,
-                                 long long int *query_time);
-static void ip_mcast_querier_wait(long long int query_time);
+static void ip_mcast_snoop_run(void);
+static void ip_mcast_querier_run(void);
+static void ip_mcast_querier_wait(void);
 static void ip_mcast_sync(
     struct ovsdb_idl_txn *ovnsb_idl_txn,
     const struct sbrec_chassis *chassis,
@@ -260,10 +145,8 @@  static void ip_mcast_sync(
     struct ovsdb_idl_index *sbrec_datapath_binding_by_key,
     struct ovsdb_idl_index *sbrec_port_binding_by_key,
     struct ovsdb_idl_index *sbrec_igmp_groups,
-    struct ovsdb_idl_index *sbrec_ip_multicast)
-    OVS_REQUIRES(pinctrl_mutex);
+    struct ovsdb_idl_index *sbrec_ip_multicast);
 static void pinctrl_ip_mcast_handle_igmp(
-    struct rconn *swconn,
     const struct flow *ip_flow,
     struct dp_packet *pkt_in,
     const struct match *md,
@@ -277,8 +160,7 @@  static void run_put_vport_bindings(
     struct ovsdb_idl_txn *ovnsb_idl_txn,
     struct ovsdb_idl_index *sbrec_datapath_binding_by_key,
     struct ovsdb_idl_index *sbrec_port_binding_by_key,
-    const struct sbrec_chassis *chassis)
-    OVS_REQUIRES(pinctrl_mutex);
+    const struct sbrec_chassis *chassis);
 static void wait_put_vport_bindings(struct ovsdb_idl_txn *ovnsb_idl_txn);
 static void pinctrl_handle_bind_vport(const struct flow *md,
                                       struct ofpbuf *userdata);
@@ -402,7 +284,6 @@  static void
 controller_event_run(struct ovsdb_idl_txn *ovnsb_idl_txn,
                      const struct sbrec_controller_event_table *ce_table,
                      const struct sbrec_chassis *chassis)
-    OVS_REQUIRES(pinctrl_mutex)
 {
     if (!ovnsb_idl_txn) {
         goto out;
@@ -437,6 +318,7 @@  out:
 void
 pinctrl_init(void)
 {
+    swconn = rconn_create(5, 0, DSCP_DEFAULT, 1 << OFP13_VERSION);
     init_put_mac_bindings();
     init_send_garps_rarps();
     init_ipv6_ras();
@@ -444,17 +326,10 @@  pinctrl_init(void)
     init_event_table();
     ip_mcast_snoop_init();
     init_put_vport_bindings();
-    pinctrl.br_int_name = NULL;
-    pinctrl_handler_seq = seq_create();
-    pinctrl_main_seq = seq_create();
-
-    latch_init(&pinctrl.pinctrl_thread_exit);
-    pinctrl.pinctrl_thread = ovs_thread_create("ovn_pinctrl", pinctrl_handler,
-                                                &pinctrl);
 }
 
 static ovs_be32
-queue_msg(struct rconn *swconn, struct ofpbuf *msg)
+queue_msg(struct ofpbuf *msg)
 {
     const struct ofp_header *oh = msg->data;
     ovs_be32 xid = oh->xid;
@@ -465,32 +340,29 @@  queue_msg(struct rconn *swconn, struct ofpbuf *msg)
 
 /* Sets up 'swconn', a newly (re)connected connection to a switch. */
 static void
-pinctrl_setup(struct rconn *swconn)
+pinctrl_setup(void)
 {
     /* Fetch the switch configuration.  The response later will allow us to
      * change the miss_send_len to UINT16_MAX, so that we can enable
      * asynchronous messages. */
-    queue_msg(swconn, ofpraw_alloc(OFPRAW_OFPT_GET_CONFIG_REQUEST,
-                                   rconn_get_version(swconn), 0));
+    queue_msg(ofpraw_alloc(OFPRAW_OFPT_GET_CONFIG_REQUEST,
+              rconn_get_version(swconn), 0));
 
     /* Set a packet-in format that supports userdata.  */
-    queue_msg(swconn,
-              ofputil_encode_set_packet_in_format(rconn_get_version(swconn),
+    queue_msg(ofputil_encode_set_packet_in_format(rconn_get_version(swconn),
                                                   OFPUTIL_PACKET_IN_NXT2));
 }
 
 static void
-set_switch_config(struct rconn *swconn,
-                  const struct ofputil_switch_config *config)
+set_switch_config(const struct ofputil_switch_config *config)
 {
     enum ofp_version version = rconn_get_version(swconn);
     struct ofpbuf *request = ofputil_encode_set_config(config, version);
-    queue_msg(swconn, request);
+    queue_msg(request);
 }
 
 static void
-set_actions_and_enqueue_msg(struct rconn *swconn,
-                            const struct dp_packet *packet,
+set_actions_and_enqueue_msg(const struct dp_packet *packet,
                             const struct match *md,
                             struct ofpbuf *userdata)
 {
@@ -522,7 +394,7 @@  set_actions_and_enqueue_msg(struct rconn *swconn,
     };
     match_set_in_port(&po.flow_metadata, OFPP_CONTROLLER);
     enum ofputil_protocol proto = ofputil_protocol_from_ofp_version(version);
-    queue_msg(swconn, ofputil_encode_packet_out(&po, proto));
+    queue_msg(ofputil_encode_packet_out(&po, proto));
     ofpbuf_uninit(&ofpacts);
 }
 
@@ -614,7 +486,7 @@  buffered_push_packet(struct buffered_packets *bp,
 }
 
 static void
-buffered_send_packets(struct rconn *swconn, struct buffered_packets *bp,
+buffered_send_packets(struct buffered_packets *bp,
                       struct eth_addr *addr)
 {
     enum ofp_version version = rconn_get_version(swconn);
@@ -633,7 +505,7 @@  buffered_send_packets(struct rconn *swconn, struct buffered_packets *bp,
             .ofpacts_len = bi->ofpacts.size,
         };
         match_set_in_port(&po.flow_metadata, OFPP_CONTROLLER);
-        queue_msg(swconn, ofputil_encode_packet_out(&po, proto));
+        queue_msg(ofputil_encode_packet_out(&po, proto));
 
         ofpbuf_uninit(&bi->ofpacts);
         dp_packet_delete(bi->p);
@@ -672,12 +544,10 @@  pinctrl_find_buffered_packets(const struct in6_addr *ip, uint32_t hash)
     return NULL;
 }
 
-/* Called with in the pinctrl_handler thread context. */
 static int
 pinctrl_handle_buffered_packets(const struct flow *ip_flow,
                                 struct dp_packet *pkt_in,
                                 const struct match *md, bool is_arp)
-    OVS_REQUIRES(pinctrl_mutex)
 {
     struct buffered_packets *bp;
     struct dp_packet *clone;
@@ -711,9 +581,8 @@  pinctrl_handle_buffered_packets(const struct flow *ip_flow,
     return 0;
 }
 
-/* Called with in the pinctrl_handler thread context. */
 static void
-pinctrl_handle_arp(struct rconn *swconn, const struct flow *ip_flow,
+pinctrl_handle_arp(const struct flow *ip_flow,
                    struct dp_packet *pkt_in,
                    const struct match *md, struct ofpbuf *userdata)
 {
@@ -726,9 +595,7 @@  pinctrl_handle_arp(struct rconn *swconn, const struct flow *ip_flow,
         return;
     }
 
-    ovs_mutex_lock(&pinctrl_mutex);
     pinctrl_handle_buffered_packets(ip_flow, pkt_in, md, true);
-    ovs_mutex_unlock(&pinctrl_mutex);
 
     /* Compose an ARP packet. */
     uint64_t packet_stub[128 / 8];
@@ -752,13 +619,12 @@  pinctrl_handle_arp(struct rconn *swconn, const struct flow *ip_flow,
                       ip_flow->vlans[0].tci);
     }
 
-    set_actions_and_enqueue_msg(swconn, &packet, md, userdata);
+    set_actions_and_enqueue_msg(&packet, md, userdata);
     dp_packet_uninit(&packet);
 }
 
-/* Called with in the pinctrl_handler thread context. */
 static void
-pinctrl_handle_icmp(struct rconn *swconn, const struct flow *ip_flow,
+pinctrl_handle_icmp(const struct flow *ip_flow,
                     struct dp_packet *pkt_in,
                     const struct match *md, struct ofpbuf *userdata,
                     bool include_orig_ip_datagram)
@@ -871,13 +737,12 @@  pinctrl_handle_icmp(struct rconn *swconn, const struct flow *ip_flow,
                       ip_flow->vlans[0].tci);
     }
 
-    set_actions_and_enqueue_msg(swconn, &packet, md, userdata);
+    set_actions_and_enqueue_msg(&packet, md, userdata);
     dp_packet_uninit(&packet);
 }
 
-/* Called with in the pinctrl_handler thread context. */
 static void
-pinctrl_handle_tcp_reset(struct rconn *swconn, const struct flow *ip_flow,
+pinctrl_handle_tcp_reset(const struct flow *ip_flow,
                          struct dp_packet *pkt_in,
                          const struct match *md, struct ofpbuf *userdata)
 {
@@ -945,14 +810,12 @@  pinctrl_handle_tcp_reset(struct rconn *swconn, const struct flow *ip_flow,
                       ip_flow->vlans[0].tci);
     }
 
-    set_actions_and_enqueue_msg(swconn, &packet, md, userdata);
+    set_actions_and_enqueue_msg(&packet, md, userdata);
     dp_packet_uninit(&packet);
 }
 
-/* Called with in the pinctrl_handler thread context. */
 static void
 pinctrl_handle_put_dhcp_opts(
-    struct rconn *swconn,
     struct dp_packet *pkt_in, struct ofputil_packet_in *pin,
     struct ofpbuf *userdata, struct ofpbuf *continuation)
 {
@@ -1198,7 +1061,7 @@  exit:
         sv.u8_val = success;
         mf_write_subfield(&dst, &sv, &pin->flow_metadata);
     }
-    queue_msg(swconn, ofputil_encode_resume(pin, continuation, proto));
+    queue_msg(ofputil_encode_resume(pin, continuation, proto));
     if (pkt_out_ptr) {
         dp_packet_uninit(pkt_out_ptr);
     }
@@ -1315,10 +1178,8 @@  compose_out_dhcpv6_opts(struct ofpbuf *userdata,
     return true;
 }
 
-/* Called with in the pinctrl_handler thread context. */
 static void
 pinctrl_handle_put_dhcpv6_opts(
-    struct rconn *swconn,
     struct dp_packet *pkt_in, struct ofputil_packet_in *pin,
     struct ofpbuf *userdata, struct ofpbuf *continuation OVS_UNUSED)
 {
@@ -1499,7 +1360,7 @@  exit:
         sv.u8_val = success;
         mf_write_subfield(&dst, &sv, &pin->flow_metadata);
     }
-    queue_msg(swconn, ofputil_encode_resume(pin, continuation, proto));
+    queue_msg(ofputil_encode_resume(pin, continuation, proto));
     dp_packet_uninit(pkt_out_ptr);
 }
 
@@ -1524,11 +1385,8 @@  struct dns_data {
 
 static struct shash dns_cache = SHASH_INITIALIZER(&dns_cache);
 
-/* Called by pinctrl_run(). Runs within the main ovn-controller
- * thread context. */
 static void
 sync_dns_cache(const struct sbrec_dns_table *dns_table)
-    OVS_REQUIRES(pinctrl_mutex)
 {
     struct shash_node *iter;
     SHASH_FOR_EACH (iter, &dns_cache) {
@@ -1589,13 +1447,10 @@  destroy_dns_cache(void)
     }
 }
 
-/* Called with in the pinctrl_handler thread context. */
 static void
 pinctrl_handle_dns_lookup(
-    struct rconn *swconn,
     struct dp_packet *pkt_in, struct ofputil_packet_in *pin,
     struct ofpbuf *userdata, struct ofpbuf *continuation)
-    OVS_REQUIRES(pinctrl_mutex)
 {
     static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
     enum ofp_version version = rconn_get_version(swconn);
@@ -1847,13 +1702,12 @@  exit:
         sv.u8_val = success;
         mf_write_subfield(&dst, &sv, &pin->flow_metadata);
     }
-    queue_msg(swconn, ofputil_encode_resume(pin, continuation, proto));
+    queue_msg(ofputil_encode_resume(pin, continuation, proto));
     dp_packet_uninit(pkt_out_ptr);
 }
 
-/* Called with in the pinctrl_handler thread context. */
 static void
-process_packet_in(struct rconn *swconn, const struct ofp_header *msg)
+process_packet_in(const struct ofp_header *msg)
 {
     static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
 
@@ -1886,53 +1740,47 @@  process_packet_in(struct rconn *swconn, const struct ofp_header *msg)
 
     switch (ntohl(ah->opcode)) {
     case ACTION_OPCODE_ARP:
-        pinctrl_handle_arp(swconn, &headers, &packet, &pin.flow_metadata,
+        pinctrl_handle_arp(&headers, &packet, &pin.flow_metadata,
                            &userdata);
         break;
     case ACTION_OPCODE_IGMP:
-        pinctrl_ip_mcast_handle_igmp(swconn, &headers, &packet,
+        pinctrl_ip_mcast_handle_igmp(&headers, &packet,
                                      &pin.flow_metadata, &userdata);
         break;
 
     case ACTION_OPCODE_PUT_ARP:
-        ovs_mutex_lock(&pinctrl_mutex);
         pinctrl_handle_put_mac_binding(&pin.flow_metadata.flow, &headers,
                                        true);
-        ovs_mutex_unlock(&pinctrl_mutex);
         break;
 
     case ACTION_OPCODE_PUT_DHCP_OPTS:
-        pinctrl_handle_put_dhcp_opts(swconn, &packet, &pin, &userdata,
+        pinctrl_handle_put_dhcp_opts(&packet, &pin, &userdata,
                                      &continuation);
         break;
 
     case ACTION_OPCODE_ND_NA:
-        pinctrl_handle_nd_na(swconn, &headers, &pin.flow_metadata, &userdata,
+        pinctrl_handle_nd_na(&headers, &pin.flow_metadata, &userdata,
                              false);
         break;
 
     case ACTION_OPCODE_ND_NA_ROUTER:
-        pinctrl_handle_nd_na(swconn, &headers, &pin.flow_metadata, &userdata,
+        pinctrl_handle_nd_na(&headers, &pin.flow_metadata, &userdata,
                              true);
         break;
 
     case ACTION_OPCODE_PUT_ND:
-        ovs_mutex_lock(&pinctrl_mutex);
         pinctrl_handle_put_mac_binding(&pin.flow_metadata.flow, &headers,
                                        false);
-        ovs_mutex_unlock(&pinctrl_mutex);
         break;
 
     case ACTION_OPCODE_PUT_DHCPV6_OPTS:
-        pinctrl_handle_put_dhcpv6_opts(swconn, &packet, &pin, &userdata,
+        pinctrl_handle_put_dhcpv6_opts(&packet, &pin, &userdata,
                                        &continuation);
         break;
 
     case ACTION_OPCODE_DNS_LOOKUP:
-        ovs_mutex_lock(&pinctrl_mutex);
-        pinctrl_handle_dns_lookup(swconn, &packet, &pin, &userdata,
+        pinctrl_handle_dns_lookup(&packet, &pin, &userdata,
                                   &continuation);
-        ovs_mutex_unlock(&pinctrl_mutex);
         break;
 
     case ACTION_OPCODE_LOG:
@@ -1940,45 +1788,41 @@  process_packet_in(struct rconn *swconn, const struct ofp_header *msg)
         break;
 
     case ACTION_OPCODE_PUT_ND_RA_OPTS:
-        pinctrl_handle_put_nd_ra_opts(swconn, &headers, &packet, &pin,
+        pinctrl_handle_put_nd_ra_opts(&headers, &packet, &pin,
                                       &userdata, &continuation);
         break;
 
     case ACTION_OPCODE_ND_NS:
-        pinctrl_handle_nd_ns(swconn, &headers, &packet, &pin.flow_metadata,
+        pinctrl_handle_nd_ns(&headers, &packet, &pin.flow_metadata,
                              &userdata);
         break;
 
     case ACTION_OPCODE_ICMP:
-        pinctrl_handle_icmp(swconn, &headers, &packet, &pin.flow_metadata,
+        pinctrl_handle_icmp(&headers, &packet, &pin.flow_metadata,
                             &userdata, false);
         break;
 
     case ACTION_OPCODE_ICMP4_ERROR:
-        pinctrl_handle_icmp(swconn, &headers, &packet, &pin.flow_metadata,
+        pinctrl_handle_icmp(&headers, &packet, &pin.flow_metadata,
                             &userdata, true);
         break;
 
     case ACTION_OPCODE_TCP_RESET:
-        pinctrl_handle_tcp_reset(swconn, &headers, &packet, &pin.flow_metadata,
+        pinctrl_handle_tcp_reset(&headers, &packet, &pin.flow_metadata,
                                  &userdata);
         break;
 
     case ACTION_OPCODE_PUT_ICMP4_FRAG_MTU:
-        pinctrl_handle_put_icmp4_frag_mtu(swconn, &headers, &packet,
+        pinctrl_handle_put_icmp4_frag_mtu(&headers, &packet,
                                           &pin, &userdata, &continuation);
         break;
 
     case ACTION_OPCODE_EVENT:
-        ovs_mutex_lock(&pinctrl_mutex);
         pinctrl_handle_event(&userdata);
-        ovs_mutex_unlock(&pinctrl_mutex);
         break;
 
     case ACTION_OPCODE_BIND_VPORT:
-        ovs_mutex_lock(&pinctrl_mutex);
         pinctrl_handle_bind_vport(&pin.flow_metadata.flow, &userdata);
-        ovs_mutex_unlock(&pinctrl_mutex);
         break;
 
     default:
@@ -1988,22 +1832,21 @@  process_packet_in(struct rconn *swconn, const struct ofp_header *msg)
     }
 }
 
-/* Called with in the pinctrl_handler thread context. */
 static void
-pinctrl_recv(struct rconn *swconn, const struct ofp_header *oh,
+pinctrl_recv(const struct ofp_header *oh,
              enum ofptype type)
 {
     if (type == OFPTYPE_ECHO_REQUEST) {
-        queue_msg(swconn, ofputil_encode_echo_reply(oh));
+        queue_msg(ofputil_encode_echo_reply(oh));
     } else if (type == OFPTYPE_GET_CONFIG_REPLY) {
         /* Enable asynchronous messages */
         struct ofputil_switch_config config;
 
         ofputil_decode_get_config_reply(oh, &config);
         config.miss_send_len = UINT16_MAX;
-        set_switch_config(swconn, &config);
+        set_switch_config(&config);
     } else if (type == OFPTYPE_PACKET_IN) {
-        process_packet_in(swconn, oh);
+        process_packet_in(oh);
     } else {
         if (VLOG_IS_DBG_ENABLED()) {
             static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
@@ -2016,118 +1859,6 @@  pinctrl_recv(struct rconn *swconn, const struct ofp_header *oh,
     }
 }
 
-/* Called with in the main ovn-controller thread context. */
-static void
-notify_pinctrl_handler(void)
-{
-    seq_change(pinctrl_handler_seq);
-}
-
-/* Called with in the pinctrl_handler thread context. */
-static void
-notify_pinctrl_main(void)
-{
-    seq_change(pinctrl_main_seq);
-}
-
-/* pinctrl_handler pthread function. */
-static void *
-pinctrl_handler(void *arg_)
-{
-    struct pinctrl *pctrl = arg_;
-    /* OpenFlow connection to the switch. */
-    struct rconn *swconn;
-    /* Last seen sequence number for 'swconn'.  When this differs from
-     * rconn_get_connection_seqno(rconn), 'swconn' has reconnected. */
-    unsigned int conn_seq_no = 0;
-
-    char *br_int_name = NULL;
-    uint64_t new_seq;
-
-    /* Next IPV6 RA in seconds. */
-    static long long int send_ipv6_ra_time = LLONG_MAX;
-    /* Next GARP/RARP announcement in ms. */
-    static long long int send_garp_rarp_time = LLONG_MAX;
-    /* Next multicast query (IGMP) in ms. */
-    static long long int send_mcast_query_time = LLONG_MAX;
-
-    swconn = rconn_create(5, 0, DSCP_DEFAULT, 1 << OFP13_VERSION);
-
-    while (!latch_is_set(&pctrl->pinctrl_thread_exit)) {
-        if (pctrl->br_int_name) {
-            if (!br_int_name || strcmp(br_int_name, pctrl->br_int_name)) {
-                free(br_int_name);
-                br_int_name = xstrdup(pctrl->br_int_name);
-            }
-        }
-
-        if (br_int_name) {
-            char *target;
-
-            target = xasprintf("unix:%s/%s.mgmt", ovs_rundir(), br_int_name);
-            if (strcmp(target, rconn_get_target(swconn))) {
-                VLOG_INFO("%s: connecting to switch", target);
-                rconn_connect(swconn, target, target);
-            }
-            free(target);
-        } else {
-            rconn_disconnect(swconn);
-        }
-
-        ovs_mutex_lock(&pinctrl_mutex);
-        ip_mcast_snoop_run();
-        ovs_mutex_unlock(&pinctrl_mutex);
-
-        rconn_run(swconn);
-        if (rconn_is_connected(swconn)) {
-            if (conn_seq_no != rconn_get_connection_seqno(swconn)) {
-                pinctrl_setup(swconn);
-                conn_seq_no = rconn_get_connection_seqno(swconn);
-            }
-
-            for (int i = 0; i < 50; i++) {
-                struct ofpbuf *msg = rconn_recv(swconn);
-                if (!msg) {
-                    break;
-                }
-
-                const struct ofp_header *oh = msg->data;
-                enum ofptype type;
-
-                ofptype_decode(&type, oh);
-                pinctrl_recv(swconn, oh, type);
-                ofpbuf_delete(msg);
-            }
-
-            if (may_inject_pkts()) {
-                ovs_mutex_lock(&pinctrl_mutex);
-                send_garp_rarp_run(swconn, &send_garp_rarp_time);
-                send_ipv6_ras(swconn, &send_ipv6_ra_time);
-                send_mac_binding_buffered_pkts(swconn);
-                ovs_mutex_unlock(&pinctrl_mutex);
-
-                ip_mcast_querier_run(swconn, &send_mcast_query_time);
-            }
-        }
-
-        rconn_run_wait(swconn);
-        rconn_recv_wait(swconn);
-        send_garp_rarp_wait(send_garp_rarp_time);
-        ipv6_ra_wait(send_ipv6_ra_time);
-        ip_mcast_querier_wait(send_mcast_query_time);
-
-        new_seq = seq_read(pinctrl_handler_seq);
-        seq_wait(pinctrl_handler_seq, new_seq);
-
-        latch_wait(&pctrl->pinctrl_thread_exit);
-        poll_block();
-    }
-
-    free(br_int_name);
-    rconn_destroy(swconn);
-    return NULL;
-}
-
 /* Called by ovn-controller. */
 void
 pinctrl_run(struct ovsdb_idl_txn *ovnsb_idl_txn,
@@ -2145,17 +1876,45 @@  pinctrl_run(struct ovsdb_idl_txn *ovnsb_idl_txn,
             const struct hmap *local_datapaths,
             const struct sset *active_tunnels)
 {
-    ovs_mutex_lock(&pinctrl_mutex);
-    if (br_int && (!pinctrl.br_int_name || strcmp(pinctrl.br_int_name,
-                                                  br_int->name))) {
-        if (pinctrl.br_int_name) {
-            free(pinctrl.br_int_name);
+    char *target = xasprintf("unix:%s/%s.mgmt", ovs_rundir(), br_int->name);
+    if (strcmp(target, rconn_get_target(swconn))) {
+        VLOG_INFO("%s: connecting to switch", target);
+        rconn_connect(swconn, target, target);
+    }
+    free(target);
+
+    rconn_run(swconn);
+
+    if (!rconn_is_connected(swconn)) {
+        return;
+    }
+
+    if (conn_seq_no != rconn_get_connection_seqno(swconn)) {
+        pinctrl_setup();
+        conn_seq_no = rconn_get_connection_seqno(swconn);
+    }
+
+    for (int i = 0; i < 50; i++) {
+        struct ofpbuf *msg = rconn_recv(swconn);
+        if (!msg) {
+            break;
         }
-        pinctrl.br_int_name = xstrdup(br_int->name);
-        /* Notify pinctrl_handler that integration bridge is
-         * set/changed. */
-        notify_pinctrl_handler();
+
+        const struct ofp_header *oh = msg->data;
+        enum ofptype type;
+
+        ofptype_decode(&type, oh);
+        pinctrl_recv(oh, type);
+        ofpbuf_delete(msg);
     }
+
+    send_garp_rarp_prepare(sbrec_port_binding_by_datapath,
+                      sbrec_port_binding_by_name, br_int, chassis,
+                      local_datapaths, active_tunnels);
+
+    run_buffered_binding(sbrec_mac_binding_by_lport_ip,
+                         local_datapaths);
+
     run_put_mac_bindings(ovnsb_idl_txn, sbrec_datapath_binding_by_key,
                          sbrec_port_binding_by_key,
                          sbrec_mac_binding_by_lport_ip);
@@ -2164,6 +1923,7 @@  pinctrl_run(struct ovsdb_idl_txn *ovnsb_idl_txn,
     send_garp_rarp_prepare(sbrec_port_binding_by_datapath,
                            sbrec_port_binding_by_name, br_int, chassis,
                            local_datapaths, active_tunnels);
+
     prepare_ipv6_ras(local_datapaths);
     sync_dns_cache(dns_table);
     controller_event_run(ovnsb_idl_txn, ce_table, chassis);
@@ -2172,13 +1932,19 @@  pinctrl_run(struct ovsdb_idl_txn *ovnsb_idl_txn,
                   sbrec_port_binding_by_key,
                   sbrec_igmp_groups,
                   sbrec_ip_multicast_opts);
-    run_buffered_binding(sbrec_mac_binding_by_lport_ip,
-                         local_datapaths);
-    ovs_mutex_unlock(&pinctrl_mutex);
+
+    ip_mcast_snoop_run();
+
+    if (may_inject_pkts()) {
+        send_garp_rarp_run();
+        send_ipv6_ras();
+        send_mac_binding_buffered_pkts();
+
+        ip_mcast_querier_run();
+    }
 }
 
-/* Table of ipv6_ra_state structures, keyed on logical port name.
- * Protected by pinctrl_mutex. */
+/* Table of ipv6_ra_state structures, keyed on logical port name.*/
 static struct shash ipv6_ras;
 
 struct ipv6_ra_config {
@@ -2319,9 +2085,8 @@  put_load(uint64_t value, enum mf_field_id dst, int ofs, int n_bits,
     bitwise_one(ofpact_set_field_mask(sf), sf->field->n_bytes, ofs, n_bits);
 }
 
-/* Called with in the pinctrl_handler thread context. */
 static long long int
-ipv6_ra_send(struct rconn *swconn, struct ipv6_ra_state *ra)
+ipv6_ra_send(struct ipv6_ra_state *ra)
 {
     if (time_msec() < ra->next_announce) {
         return ra->next_announce;
@@ -2368,7 +2133,7 @@  ipv6_ra_send(struct rconn *swconn, struct ipv6_ra_state *ra)
     match_set_in_port(&po.flow_metadata, OFPP_CONTROLLER);
     enum ofp_version version = rconn_get_version(swconn);
     enum ofputil_protocol proto = ofputil_protocol_from_ofp_version(version);
-    queue_msg(swconn, ofputil_encode_packet_out(&po, proto));
+    queue_msg(ofputil_encode_packet_out(&po, proto));
     dp_packet_uninit(&packet);
     ofpbuf_uninit(&ofpacts);
 
@@ -2378,9 +2143,8 @@  ipv6_ra_send(struct rconn *swconn, struct ipv6_ra_state *ra)
     return ra->next_announce;
 }
 
-/* Called with in the pinctrl_handler thread context. */
 static void
-ipv6_ra_wait(long long int send_ipv6_ra_time)
+ipv6_ra_wait(void)
 {
     /* Set the poll timer for next IPv6 RA only if IPv6 RAs needs to
      * be sent. */
@@ -2389,27 +2153,22 @@  ipv6_ra_wait(long long int send_ipv6_ra_time)
     }
 }
 
-/* Called with in the pinctrl_handler thread context. */
 static void
-send_ipv6_ras(struct rconn *swconn, long long int *send_ipv6_ra_time)
-    OVS_REQUIRES(pinctrl_mutex)
+send_ipv6_ras(void)
 {
-    *send_ipv6_ra_time = LLONG_MAX;
+    send_ipv6_ra_time = LLONG_MAX;
     struct shash_node *iter;
     SHASH_FOR_EACH (iter, &ipv6_ras) {
         struct ipv6_ra_state *ra = iter->data;
-        long long int next_ra = ipv6_ra_send(swconn, ra);
-        if (*send_ipv6_ra_time > next_ra) {
-            *send_ipv6_ra_time = next_ra;
+        long long int next_ra = ipv6_ra_send(ra);
+        if (send_ipv6_ra_time > next_ra) {
+            send_ipv6_ra_time = next_ra;
         }
     }
 }
 
-/* Called by pinctrl_run(). Runs with in the main ovn-controller
- * thread context. */
 static void
 prepare_ipv6_ras(const struct hmap *local_datapaths)
-    OVS_REQUIRES(pinctrl_mutex)
 {
     struct shash_node *iter, *iter_next;
 
@@ -2418,7 +2177,6 @@  prepare_ipv6_ras(const struct hmap *local_datapaths)
         ra->delete_me = true;
     }
 
-    bool changed = false;
     const struct local_datapath *ld;
     HMAP_FOR_EACH (ld, hmap_node, local_datapaths) {
 
@@ -2444,7 +2202,6 @@  prepare_ipv6_ras(const struct hmap *local_datapaths)
                     ra->config->min_interval,
                     ra->config->max_interval);
                 shash_add(&ipv6_ras, pb->logical_port, ra);
-                changed = true;
             } else {
                 if (config->min_interval != ra->config->min_interval ||
                     config->max_interval != ra->config->max_interval)
@@ -2462,8 +2219,6 @@  prepare_ipv6_ras(const struct hmap *local_datapaths)
             ra->port_key = peer->tunnel_key;
             ra->metadata = peer->datapath->tunnel_key;
             ra->delete_me = false;
-
-            /* pinctrl_handler thread will send the IPv6 RAs. */
         }
     }
 
@@ -2475,33 +2230,26 @@  prepare_ipv6_ras(const struct hmap *local_datapaths)
             ipv6_ra_delete(ra);
         }
     }
-
-    if (changed) {
-        notify_pinctrl_handler();
-    }
-
 }
 
-/* Called by pinctrl_run(). Runs with in the main ovn-controller
- * thread context. */
 void
 pinctrl_wait(struct ovsdb_idl_txn *ovnsb_idl_txn)
 {
     wait_put_mac_bindings(ovnsb_idl_txn);
     wait_controller_event(ovnsb_idl_txn);
     wait_put_vport_bindings(ovnsb_idl_txn);
-    int64_t new_seq = seq_read(pinctrl_main_seq);
-    seq_wait(pinctrl_main_seq, new_seq);
+    rconn_run_wait(swconn);
+    rconn_recv_wait(swconn);
+    send_garp_rarp_wait();
+    ipv6_ra_wait();
+    ip_mcast_querier_wait();
 }
 
 /* Called by ovn-controller. */
 void
 pinctrl_destroy(void)
 {
-    latch_set(&pinctrl.pinctrl_thread_exit);
-    pthread_join(pinctrl.pinctrl_thread, NULL);
-    latch_destroy(&pinctrl.pinctrl_thread_exit);
-    free(pinctrl.br_int_name);
+    rconn_destroy(swconn);
     destroy_send_garps_rarps();
     destroy_ipv6_ras();
     destroy_buffered_packets_map();
@@ -2510,8 +2258,6 @@  pinctrl_destroy(void)
     destroy_put_vport_bindings();
     destroy_dns_cache();
     ip_mcast_snoop_destroy();
-    seq_destroy(pinctrl_main_seq);
-    seq_destroy(pinctrl_handler_seq);
 }
 
 /* Implementation of the "put_arp" and "put_nd" OVN actions.  These
@@ -2569,12 +2315,10 @@  pinctrl_find_put_mac_binding(uint32_t dp_key, uint32_t port_key,
     return NULL;
 }
 
-/* Called with in the pinctrl_handler thread context. */
 static void
 pinctrl_handle_put_mac_binding(const struct flow *md,
                                const struct flow *headers,
                                bool is_arp)
-    OVS_REQUIRES(pinctrl_mutex)
 {
     uint32_t dp_key = ntohll(md->metadata);
     uint32_t port_key = md->regs[MFF_LOG_INPORT - MFF_REG0];
@@ -2603,21 +2347,14 @@  pinctrl_handle_put_mac_binding(const struct flow *md,
         pmb->ip_key = ip_key;
     }
     pmb->mac = headers->dl_src;
-
-    /* We can send the buffered packet once the main ovn-controller
-     * thread calls pinctrl_run() and it writes the mac_bindings stored
-     * in 'put_mac_bindings' hmap into the Southbound MAC_Binding table. */
-    notify_pinctrl_main();
 }
 
-/* Called with in the pinctrl_handler thread context. */
 static void
-send_mac_binding_buffered_pkts(struct rconn *swconn)
-    OVS_REQUIRES(pinctrl_mutex)
+send_mac_binding_buffered_pkts(void)
 {
     struct buffered_packets *bp;
     LIST_FOR_EACH_POP (bp, list, &buffered_mac_bindings) {
-        buffered_send_packets(swconn, bp, &bp->ea);
+        buffered_send_packets(bp, &bp->ea);
         free(bp);
     }
     ovs_list_init(&buffered_mac_bindings);
@@ -2685,14 +2422,11 @@  run_put_mac_binding(struct ovsdb_idl_txn *ovnsb_idl_txn,
     ds_destroy(&ip_s);
 }
 
-/* Called by pinctrl_run(). Runs with in the main ovn-controller
- * thread context. */
 static void
 run_put_mac_bindings(struct ovsdb_idl_txn *ovnsb_idl_txn,
                      struct ovsdb_idl_index *sbrec_datapath_binding_by_key,
                      struct ovsdb_idl_index *sbrec_port_binding_by_key,
                      struct ovsdb_idl_index *sbrec_mac_binding_by_lport_ip)
-    OVS_REQUIRES(pinctrl_mutex)
 {
     if (!ovnsb_idl_txn) {
         return;
@@ -2711,10 +2445,8 @@  run_put_mac_bindings(struct ovsdb_idl_txn *ovnsb_idl_txn,
 static void
 run_buffered_binding(struct ovsdb_idl_index *sbrec_mac_binding_by_lport_ip,
                      const struct hmap *local_datapaths)
-    OVS_REQUIRES(pinctrl_mutex)
 {
     const struct local_datapath *ld;
-    bool notify = false;
 
     HMAP_FOR_EACH (ld, hmap_node, local_datapaths) {
 
@@ -2733,17 +2465,12 @@  run_buffered_binding(struct ovsdb_idl_index *sbrec_mac_binding_by_lport_ip,
                                   ETH_ADDR_SCAN_ARGS(cur_qp->ea))) {
                     hmap_remove(&buffered_packets_map, &cur_qp->hmap_node);
                     ovs_list_push_back(&buffered_mac_bindings, &cur_qp->list);
-                    notify = true;
                 }
                 ds_destroy(&ip_s);
             }
         }
     }
     buffered_packets_map_gc();
-
-    if (notify) {
-        notify_pinctrl_handler();
-    }
 }
 
 static void
@@ -2795,7 +2522,6 @@  destroy_send_garps_rarps(void)
     shash_destroy_free_data(&send_garp_rarp_data);
 }
 
-/* Runs with in the main ovn-controller thread context. */
 static void
 add_garp_rarp(const char *name, const struct eth_addr ea, ovs_be32 ip,
               uint32_t dp_key, uint32_t port_key)
@@ -2808,10 +2534,6 @@  add_garp_rarp(const char *name, const struct eth_addr ea, ovs_be32 ip,
     garp_rarp->dp_key = dp_key;
     garp_rarp->port_key = port_key;
     shash_add(&send_garp_rarp_data, name, garp_rarp);
-
-    /* Notify pinctrl_handler so that it can wakeup and process
-     * these GARP/RARP requests. */
-    notify_pinctrl_handler();
 }
 
 /* Add or update a vif for which GARPs need to be announced. */
@@ -2890,14 +2612,11 @@  send_garp_rarp_delete(const char *lport)
     struct garp_rarp_data *garp_rarp = shash_find_and_delete
                                        (&send_garp_rarp_data, lport);
     free(garp_rarp);
-    notify_pinctrl_handler();
 }
 
-/* Called with in the pinctrl_handler thread context. */
 static long long int
-send_garp_rarp(struct rconn *swconn, struct garp_rarp_data *garp_rarp,
+send_garp_rarp(struct garp_rarp_data *garp_rarp,
                long long int current_time)
-    OVS_REQUIRES(pinctrl_mutex)
 {
     if (current_time < garp_rarp->announce_time) {
         return garp_rarp->announce_time;
@@ -2933,7 +2652,7 @@  send_garp_rarp(struct rconn *swconn, struct garp_rarp_data *garp_rarp,
     };
     match_set_in_port(&po.flow_metadata, OFPP_CONTROLLER);
     enum ofputil_protocol proto = ofputil_protocol_from_ofp_version(version);
-    queue_msg(swconn, ofputil_encode_packet_out(&po, proto));
+    queue_msg(ofputil_encode_packet_out(&po, proto));
     dp_packet_uninit(&packet);
     ofpbuf_uninit(&ofpacts);
 
@@ -2968,8 +2687,7 @@  struct ip_mcast_snoop_cfg {
 };
 
 /*
- * Holds per-datapath information about multicast snooping. Maintained by
- * pinctrl_handler().
+ * Holds per-datapath information about multicast snooping.
  */
 struct ip_mcast_snoop {
     struct hmap_node hmap_node;    /* Linkage in the hash map. */
@@ -2994,24 +2712,14 @@  struct ip_mcast_snoop_state {
 /* Only default vlan supported for now. */
 #define IP_MCAST_VLAN 1
 
-/* Multicast snooping information stored independently by datapath key.
- * Protected by pinctrl_mutex. pinctrl_handler has RW access and pinctrl_main
- * has RO access.
- */
-static struct hmap mcast_snoop_map OVS_GUARDED_BY(pinctrl_mutex);
+/* Multicast snooping information stored independently by datapath key. */
+static struct hmap mcast_snoop_map;
 
-/* Contains multicast queries to be sent. Only used by pinctrl_handler so no
- * locking needed.
- */
+/* Contains multicast queries to be sent.*/
 static struct ovs_list mcast_query_list;
 
-/* Multicast config information stored independently by datapath key.
- * Protected by pinctrl_mutex. pinctrl_handler has RO access and pinctrl_main
- * has RW access. Read accesses from pinctrl_ip_mcast_handle_igmp() can be
- * performed without taking the lock as they are executed in the pinctrl_main
- * thread.
- */
-static struct hmap mcast_cfg_map OVS_GUARDED_BY(pinctrl_mutex);
+/* Multicast config information stored independently by datapath key. */
+static struct hmap mcast_cfg_map;
 
 static void
 ip_mcast_snoop_cfg_load(struct ip_mcast_snoop_cfg *cfg,
@@ -3089,7 +2797,6 @@  ip_mcast_snoop_hash(int64_t dp_key)
 
 static struct ip_mcast_snoop_state *
 ip_mcast_snoop_state_add(int64_t dp_key)
-    OVS_REQUIRES(pinctrl_mutex)
 {
     struct ip_mcast_snoop_state *ms_state = xmalloc(sizeof *ms_state);
 
@@ -3101,7 +2808,6 @@  ip_mcast_snoop_state_add(int64_t dp_key)
 
 static struct ip_mcast_snoop_state *
 ip_mcast_snoop_state_find(int64_t dp_key)
-    OVS_REQUIRES(pinctrl_mutex)
 {
     struct ip_mcast_snoop_state *ms_state;
     uint32_t hash = ip_mcast_snoop_hash(dp_key);
@@ -3117,15 +2823,16 @@  ip_mcast_snoop_state_find(int64_t dp_key)
 static bool
 ip_mcast_snoop_state_update(int64_t dp_key,
                             const struct ip_mcast_snoop_cfg *cfg)
-    OVS_REQUIRES(pinctrl_mutex)
 {
     bool notify = false;
     struct ip_mcast_snoop_state *ms_state = ip_mcast_snoop_state_find(dp_key);
 
     if (!ms_state) {
+        VLOG_INFO("Adding snoop state");
         ms_state = ip_mcast_snoop_state_add(dp_key);
         notify = true;
     } else if (memcmp(cfg, &ms_state->cfg, sizeof *cfg)) {
+        VLOG_INFO("Not adding snoop state but it's different...");
         notify = true;
     }
 
@@ -3135,7 +2842,6 @@  ip_mcast_snoop_state_update(int64_t dp_key,
 
 static void
 ip_mcast_snoop_state_remove(struct ip_mcast_snoop_state *ms_state)
-    OVS_REQUIRES(pinctrl_mutex)
 {
     hmap_remove(&mcast_cfg_map, &ms_state->hmap_node);
     free(ms_state);
@@ -3195,7 +2901,6 @@  ip_mcast_snoop_configure(struct ip_mcast_snoop *ip_ms,
         goto set_fields;
     }
 
-    ovs_rwlock_wrlock(&ip_ms->ms->rwlock);
     if (cfg->table_size != ip_ms->cfg.table_size) {
         mcast_snooping_set_max_entries(ip_ms->ms, cfg->table_size);
     }
@@ -3203,7 +2908,6 @@  ip_mcast_snoop_configure(struct ip_mcast_snoop *ip_ms,
     if (cfg->idle_time_s != ip_ms->cfg.idle_time_s) {
         mcast_snooping_set_idle_time(ip_ms->ms, cfg->idle_time_s);
     }
-    ovs_rwlock_unlock(&ip_ms->ms->rwlock);
 
     if (cfg->query_interval_s != ip_ms->cfg.query_interval_s) {
         long long int now = time_msec();
@@ -3220,7 +2924,6 @@  set_fields:
 
 static struct ip_mcast_snoop *
 ip_mcast_snoop_add(int64_t dp_key, const struct ip_mcast_snoop_cfg *cfg)
-    OVS_REQUIRES(pinctrl_mutex)
 {
     struct ip_mcast_snoop *ip_ms = xzalloc(sizeof *ip_ms);
 
@@ -3237,7 +2940,6 @@  ip_mcast_snoop_add(int64_t dp_key, const struct ip_mcast_snoop_cfg *cfg)
 
 static struct ip_mcast_snoop *
 ip_mcast_snoop_find(int64_t dp_key)
-    OVS_REQUIRES(pinctrl_mutex)
 {
     struct ip_mcast_snoop *ip_ms;
 
@@ -3252,7 +2954,6 @@  ip_mcast_snoop_find(int64_t dp_key)
 
 static void
 ip_mcast_snoop_remove(struct ip_mcast_snoop *ip_ms)
-    OVS_REQUIRES(pinctrl_mutex)
 {
     hmap_remove(&mcast_snoop_map, &ip_ms->hmap_node);
 
@@ -3266,7 +2967,6 @@  ip_mcast_snoop_remove(struct ip_mcast_snoop *ip_ms)
 
 static void
 ip_mcast_snoop_init(void)
-    OVS_NO_THREAD_SAFETY_ANALYSIS
 {
     hmap_init(&mcast_snoop_map);
     ovs_list_init(&mcast_query_list);
@@ -3275,7 +2975,6 @@  ip_mcast_snoop_init(void)
 
 static void
 ip_mcast_snoop_destroy(void)
-    OVS_NO_THREAD_SAFETY_ANALYSIS
 {
     struct ip_mcast_snoop *ip_ms, *ip_ms_next;
 
@@ -3293,11 +2992,10 @@  ip_mcast_snoop_destroy(void)
 
 static void
 ip_mcast_snoop_run(void)
-    OVS_REQUIRES(pinctrl_mutex)
 {
     struct ip_mcast_snoop *ip_ms, *ip_ms_next;
 
-    /* First read the config updated by pinctrl_main. If there's any new or
+    /* First read the config. If there's any new or
      * updated config then apply it.
      */
     struct ip_mcast_snoop_state *ip_ms_state;
@@ -3306,15 +3004,15 @@  ip_mcast_snoop_run(void)
         ip_ms = ip_mcast_snoop_find(ip_ms_state->dp_key);
 
         if (!ip_ms) {
+            VLOG_INFO("Adding ip mcast snoop (ip mcast snoop run)");
             ip_mcast_snoop_add(ip_ms_state->dp_key, &ip_ms_state->cfg);
         } else if (memcmp(&ip_ms_state->cfg, &ip_ms->cfg,
                           sizeof ip_ms_state->cfg)) {
+            VLOG_INFO("Configuring ip mcast snoop (ip mcast snoop run)");
             ip_mcast_snoop_configure(ip_ms, &ip_ms_state->cfg);
         }
     }
 
-    bool notify = false;
-
     /* Then walk the multicast snoop instances. */
     HMAP_FOR_EACH_SAFE (ip_ms, ip_ms_next, hmap_node, &mcast_snoop_map) {
 
@@ -3326,22 +3024,14 @@  ip_mcast_snoop_run(void)
 
         /* If enabled run the snooping instance to timeout old groups. */
         if (ip_ms->cfg.enabled) {
-            if (mcast_snooping_run(ip_ms->ms)) {
-                notify = true;
-            }
-
+            mcast_snooping_run(ip_ms->ms);
             mcast_snooping_wait(ip_ms->ms);
         }
     }
-
-    if (notify) {
-        notify_pinctrl_main();
-    }
 }
 
 /*
- * This runs in the pinctrl main thread, so it has access to the southbound
- * database. It reads the IP_Multicast table and updates the local multicast
+ * This reads the IP_Multicast table and updates the local multicast
  * configuration. Then writes to the southbound database the updated
  * IGMP_Groups.
  */
@@ -3353,10 +3043,7 @@  ip_mcast_sync(struct ovsdb_idl_txn *ovnsb_idl_txn,
               struct ovsdb_idl_index *sbrec_port_binding_by_key,
               struct ovsdb_idl_index *sbrec_igmp_groups,
               struct ovsdb_idl_index *sbrec_ip_multicast)
-    OVS_REQUIRES(pinctrl_mutex)
 {
-    bool notify = false;
-
     if (!ovnsb_idl_txn || !chassis) {
         return;
     }
@@ -3373,17 +3060,16 @@  ip_mcast_sync(struct ovsdb_idl_txn *ovnsb_idl_txn,
         struct ip_mcast_snoop_cfg cfg;
 
         ip_mcast_snoop_cfg_load(&cfg, ip_mcast);
-        if (ip_mcast_snoop_state_update(dp_key, &cfg)) {
-            notify = true;
-        }
+        VLOG_INFO("Are we calling ip_mcast_snoop_update?");
+        ip_mcast_snoop_state_update(dp_key, &cfg);
     }
 
     /* Then delete the old entries. */
     HMAP_FOR_EACH_SAFE (ip_ms_state, ip_ms_state_next, hmap_node,
                         &mcast_cfg_map) {
         if (!get_local_datapath(local_datapaths, ip_ms_state->dp_key)) {
+            VLOG_INFO("Removing ip mcast snoop state?");
             ip_mcast_snoop_state_remove(ip_ms_state);
-            notify = true;
         }
     }
 
@@ -3415,7 +3101,6 @@  ip_mcast_sync(struct ovsdb_idl_txn *ovnsb_idl_txn,
             continue;
         }
 
-        ovs_rwlock_rdlock(&ip_ms->ms->rwlock);
         struct mcast_group *mc_group =
             mcast_snooping_lookup4(ip_ms->ms, group_addr,
                                    IP_MCAST_VLAN);
@@ -3423,7 +3108,6 @@  ip_mcast_sync(struct ovsdb_idl_txn *ovnsb_idl_txn,
         if (!mc_group || ovs_list_is_empty(&mc_group->bundle_lru)) {
             igmp_group_delete(sbrec_igmp);
         }
-        ovs_rwlock_unlock(&ip_ms->ms->rwlock);
     }
 
     struct ip_mcast_snoop *ip_ms, *ip_ms_next;
@@ -3448,7 +3132,6 @@  ip_mcast_sync(struct ovsdb_idl_txn *ovnsb_idl_txn,
 
         struct mcast_group *mc_group;
 
-        ovs_rwlock_rdlock(&ip_ms->ms->rwlock);
         LIST_FOR_EACH (mc_group, group_node, &ip_ms->ms->group_lru) {
             if (ovs_list_is_empty(&mc_group->bundle_lru)) {
                 continue;
@@ -3464,21 +3147,14 @@  ip_mcast_sync(struct ovsdb_idl_txn *ovnsb_idl_txn,
                                     sbrec_port_binding_by_key, ip_ms->ms,
                                     mc_group);
         }
-        ovs_rwlock_unlock(&ip_ms->ms->rwlock);
-    }
-
-    if (notify) {
-        notify_pinctrl_handler();
     }
 }
 
 static void
-pinctrl_ip_mcast_handle_igmp(struct rconn *swconn OVS_UNUSED,
-                             const struct flow *ip_flow,
+pinctrl_ip_mcast_handle_igmp(const struct flow *ip_flow,
                              struct dp_packet *pkt_in,
                              const struct match *md,
                              struct ofpbuf *userdata OVS_UNUSED)
-    OVS_NO_THREAD_SAFETY_ANALYSIS
 {
     static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
 
@@ -3515,45 +3191,33 @@  pinctrl_ip_mcast_handle_igmp(struct rconn *swconn OVS_UNUSED,
 
     void *port_key_data = (void *)(uintptr_t)port_key;
 
-    bool group_change = false;
-
-    ovs_rwlock_wrlock(&ip_ms->ms->rwlock);
     switch (ntohs(ip_flow->tp_src)) {
      /* Only default VLAN is supported for now. */
     case IGMP_HOST_MEMBERSHIP_REPORT:
     case IGMPV2_HOST_MEMBERSHIP_REPORT:
-        group_change =
-            mcast_snooping_add_group4(ip_ms->ms, ip4, IP_MCAST_VLAN,
-                                      port_key_data);
+        mcast_snooping_add_group4(ip_ms->ms, ip4, IP_MCAST_VLAN,
+                                  port_key_data);
         break;
     case IGMP_HOST_LEAVE_MESSAGE:
-        group_change =
-            mcast_snooping_leave_group4(ip_ms->ms, ip4, IP_MCAST_VLAN,
-                                        port_key_data);
+        mcast_snooping_leave_group4(ip_ms->ms, ip4, IP_MCAST_VLAN,
+                                    port_key_data);
         break;
     case IGMP_HOST_MEMBERSHIP_QUERY:
         /* Shouldn't be receiving any of these since we are the multicast
          * router. Store them for now.
          */
-        group_change =
-            mcast_snooping_add_mrouter(ip_ms->ms, IP_MCAST_VLAN,
-                                       port_key_data);
+        mcast_snooping_add_mrouter(ip_ms->ms, IP_MCAST_VLAN,
+                                   port_key_data);
         break;
     case IGMPV3_HOST_MEMBERSHIP_REPORT:
-        group_change =
-            mcast_snooping_add_report(ip_ms->ms, pkt_in, IP_MCAST_VLAN,
-                                      port_key_data);
+        mcast_snooping_add_report(ip_ms->ms, pkt_in, IP_MCAST_VLAN,
+                                  port_key_data);
         break;
     }
-    ovs_rwlock_unlock(&ip_ms->ms->rwlock);
-
-    if (group_change) {
-        notify_pinctrl_main();
-    }
 }
 
 static long long int
-ip_mcast_querier_send(struct rconn *swconn, struct ip_mcast_snoop *ip_ms,
+ip_mcast_querier_send(struct ip_mcast_snoop *ip_ms,
                       long long int current_time)
 {
     if (current_time < ip_ms->query_time_ms) {
@@ -3621,7 +3285,7 @@  ip_mcast_querier_send(struct rconn *swconn, struct ip_mcast_snoop *ip_ms,
     };
     match_set_in_port(&po.flow_metadata, OFPP_CONTROLLER);
     enum ofputil_protocol proto = ofputil_protocol_from_ofp_version(version);
-    queue_msg(swconn, ofputil_encode_packet_out(&po, proto));
+    queue_msg(ofputil_encode_packet_out(&po, proto));
     dp_packet_uninit(&packet);
     ofpbuf_uninit(&ofpacts);
 
@@ -3631,32 +3295,36 @@  ip_mcast_querier_send(struct rconn *swconn, struct ip_mcast_snoop *ip_ms,
 }
 
 static void
-ip_mcast_querier_run(struct rconn *swconn, long long int *query_time)
+ip_mcast_querier_run(void)
 {
+    VLOG_INFO("Running querier");
     if (ovs_list_is_empty(&mcast_query_list)) {
         return;
     }
 
     /* Send multicast queries and update the next query time. */
     long long int current_time = time_msec();
-    *query_time = LLONG_MAX;
+    send_mcast_query_time = LLONG_MAX;
 
     struct ip_mcast_snoop *ip_ms;
 
     LIST_FOR_EACH (ip_ms, query_node, &mcast_query_list) {
         long long int next_query_time =
-            ip_mcast_querier_send(swconn, ip_ms, current_time);
-        if (*query_time > next_query_time) {
-            *query_time = next_query_time;
+            ip_mcast_querier_send(ip_ms, current_time);
+        if (send_mcast_query_time > next_query_time) {
+            VLOG_INFO("Updated query time from %lld to %lld", send_mcast_query_time, next_query_time);
+            send_mcast_query_time = next_query_time;
         }
     }
 }
 
 static void
-ip_mcast_querier_wait(long long int query_time)
+ip_mcast_querier_wait(void)
 {
+    VLOG_INFO("Running querier wait");
     if (!ovs_list_is_empty(&mcast_query_list)) {
-        poll_timer_wait_until(query_time);
+        VLOG_INFO("Waiting until %lld", send_mcast_query_time);
+        poll_timer_wait_until(send_mcast_query_time);
     }
 }
 
@@ -3882,7 +3550,7 @@  get_nat_addresses_and_keys(struct ovsdb_idl_index *sbrec_port_binding_by_name,
 }
 
 static void
-send_garp_rarp_wait(long long int send_garp_rarp_time)
+send_garp_rarp_wait(void)
 {
     /* Set the poll timer for next garp/rarp only if there is data to
      * be sent. */
@@ -3891,10 +3559,8 @@  send_garp_rarp_wait(long long int send_garp_rarp_time)
     }
 }
 
-/* Called with in the pinctrl_handler thread context. */
 static void
-send_garp_rarp_run(struct rconn *swconn, long long int *send_garp_rarp_time)
-    OVS_REQUIRES(pinctrl_mutex)
+send_garp_rarp_run(void)
 {
     if (shash_is_empty(&send_garp_rarp_data)) {
         return;
@@ -3903,18 +3569,16 @@  send_garp_rarp_run(struct rconn *swconn, long long int *send_garp_rarp_time)
     /* Send GARPs, and update the next announcement. */
     struct shash_node *iter;
     long long int current_time = time_msec();
-    *send_garp_rarp_time = LLONG_MAX;
+    send_garp_rarp_time = LLONG_MAX;
     SHASH_FOR_EACH (iter, &send_garp_rarp_data) {
-        long long int next_announce = send_garp_rarp(swconn, iter->data,
+        long long int next_announce = send_garp_rarp(iter->data,
                                                      current_time);
-        if (*send_garp_rarp_time > next_announce) {
-            *send_garp_rarp_time = next_announce;
+        if (send_garp_rarp_time > next_announce) {
+            send_garp_rarp_time = next_announce;
         }
     }
 }
 
-/* Called by pinctrl_run(). Runs with in the main ovn-controller
- * thread context. */
 static void
 send_garp_rarp_prepare(struct ovsdb_idl_index *sbrec_port_binding_by_datapath,
                        struct ovsdb_idl_index *sbrec_port_binding_by_name,
@@ -3922,7 +3586,6 @@  send_garp_rarp_prepare(struct ovsdb_idl_index *sbrec_port_binding_by_datapath,
                        const struct sbrec_chassis *chassis,
                        const struct hmap *local_datapaths,
                        const struct sset *active_tunnels)
-    OVS_REQUIRES(pinctrl_mutex)
 {
     struct sset localnet_vifs = SSET_INITIALIZER(&localnet_vifs);
     struct sset local_l3gw_ports = SSET_INITIALIZER(&local_l3gw_ports);
@@ -3970,8 +3633,6 @@  send_garp_rarp_prepare(struct ovsdb_idl_index *sbrec_port_binding_by_datapath,
         }
     }
 
-    /* pinctrl_handler thread will send the GARPs. */
-
     sset_destroy(&localnet_vifs);
     sset_destroy(&local_l3gw_ports);
 
@@ -4031,9 +3692,8 @@  reload_metadata(struct ofpbuf *ofpacts, const struct match *md)
     }
 }
 
-/* Called with in the pinctrl_handler thread context. */
 static void
-pinctrl_handle_nd_na(struct rconn *swconn, const struct flow *ip_flow,
+pinctrl_handle_nd_na(const struct flow *ip_flow,
                      const struct match *md,
                      struct ofpbuf *userdata, bool is_router)
 {
@@ -4060,13 +3720,12 @@  pinctrl_handle_nd_na(struct rconn *swconn, const struct flow *ip_flow,
                   htonl(rso_flags));
 
     /* Reload previous packet metadata and set actions from userdata. */
-    set_actions_and_enqueue_msg(swconn, &packet, md, userdata);
+    set_actions_and_enqueue_msg(&packet, md, userdata);
     dp_packet_uninit(&packet);
 }
 
-/* Called with in the pinctrl_handler thread context. */
 static void
-pinctrl_handle_nd_ns(struct rconn *swconn, const struct flow *ip_flow,
+pinctrl_handle_nd_ns(const struct flow *ip_flow,
                      struct dp_packet *pkt_in,
                      const struct match *md, struct ofpbuf *userdata)
 {
@@ -4077,9 +3736,7 @@  pinctrl_handle_nd_ns(struct rconn *swconn, const struct flow *ip_flow,
         return;
     }
 
-    ovs_mutex_lock(&pinctrl_mutex);
     pinctrl_handle_buffered_packets(ip_flow, pkt_in, md, false);
-    ovs_mutex_unlock(&pinctrl_mutex);
 
     uint64_t packet_stub[128 / 8];
     struct dp_packet packet;
@@ -4089,14 +3746,12 @@  pinctrl_handle_nd_ns(struct rconn *swconn, const struct flow *ip_flow,
                   &ip_flow->ipv6_dst);
 
     /* Reload previous packet metadata and set actions from userdata. */
-    set_actions_and_enqueue_msg(swconn, &packet, md, userdata);
+    set_actions_and_enqueue_msg(&packet, md, userdata);
     dp_packet_uninit(&packet);
 }
 
-/* Called with in the pinctrl_handler thread context. */
 static void
 pinctrl_handle_put_nd_ra_opts(
-    struct rconn *swconn,
     const struct flow *in_flow, struct dp_packet *pkt_in,
     struct ofputil_packet_in *pin, struct ofpbuf *userdata,
     struct ofpbuf *continuation)
@@ -4178,14 +3833,12 @@  exit:
         sv.u8_val = success;
         mf_write_subfield(&dst, &sv, &pin->flow_metadata);
     }
-    queue_msg(swconn, ofputil_encode_resume(pin, continuation, proto));
+    queue_msg(ofputil_encode_resume(pin, continuation, proto));
     dp_packet_uninit(pkt_out_ptr);
 }
 
-/* Called with in the pinctrl_handler thread context. */
 static void
-pinctrl_handle_put_icmp4_frag_mtu(struct rconn *swconn,
-                                  const struct flow *in_flow,
+pinctrl_handle_put_icmp4_frag_mtu(const struct flow *in_flow,
                                   struct dp_packet *pkt_in,
                                   struct ofputil_packet_in *pin,
                                   struct ofpbuf *userdata,
@@ -4225,7 +3878,7 @@  pinctrl_handle_put_icmp4_frag_mtu(struct rconn *swconn,
     pin->packet_len = dp_packet_size(pkt_out);
 
 exit:
-    queue_msg(swconn, ofputil_encode_resume(pin, continuation, proto));
+    queue_msg(ofputil_encode_resume(pin, continuation, proto));
     if (pkt_out) {
         dp_packet_delete(pkt_out);
     }
@@ -4303,7 +3956,6 @@  pinctrl_handle_empty_lb_backends_opts(struct ofpbuf *userdata)
         event->protocol = protocol;
         event->load_balancer = load_balancer;
         event->timestamp = time_msec();
-        notify_pinctrl_main();
     } else {
         free(vip);
         free(protocol);
@@ -4314,7 +3966,6 @@  pinctrl_handle_empty_lb_backends_opts(struct ofpbuf *userdata)
 
 static void
 pinctrl_handle_event(struct ofpbuf *userdata)
-    OVS_REQUIRES(pinctrl_mutex)
 {
     ovs_be32 *pevent;
 
@@ -4424,14 +4075,11 @@  run_put_vport_binding(struct ovsdb_idl_txn *ovnsb_idl_txn OVS_UNUSED,
     }
 }
 
-/* Called by pinctrl_run(). Runs with in the main ovn-controller
- * thread context. */
 static void
 run_put_vport_bindings(struct ovsdb_idl_txn *ovnsb_idl_txn,
                       struct ovsdb_idl_index *sbrec_datapath_binding_by_key,
                       struct ovsdb_idl_index *sbrec_port_binding_by_key,
                       const struct sbrec_chassis *chassis)
-    OVS_REQUIRES(pinctrl_mutex)
 {
     if (!ovnsb_idl_txn) {
         return;
@@ -4446,11 +4094,9 @@  run_put_vport_bindings(struct ovsdb_idl_txn *ovnsb_idl_txn,
     flush_put_vport_bindings();
 }
 
-/* Called with in the pinctrl_handler thread context. */
 static void
 pinctrl_handle_bind_vport(
     const struct flow *md, struct ofpbuf *userdata)
-    OVS_REQUIRES(pinctrl_mutex)
 {
     /* Get the datapath key from the packet metadata. */
     uint32_t dp_key = ntohll(md->metadata);
@@ -4481,6 +4127,4 @@  pinctrl_handle_bind_vport(
     vpb->dp_key = dp_key;
     vpb->vport_key = vport_key;
     vpb->vport_parent_key = vport_parent_key;
-
-    notify_pinctrl_main();
 }