diff mbox series

[ovs-dev,13/15] Clone packets to both port chassis

Message ID 20220215035737.1820679-14-ihrachys@redhat.com
State Superseded, archived
Headers show
Series Support additional-chassis for ports | expand

Checks

Context Check Description
ovsrobot/apply-robot success apply and check: success

Commit Message

Ihar Hrachyshka Feb. 15, 2022, 3:57 a.m. UTC
When requested-additional-chassis is set, port binding is configured
in two cluster locations. In case of live migration scenario, only one
of the locations run a workload at a particular point in time. Yet,
it's expected that the workload may switch to running at the
additional-chassis at any moment during live migration (depends on
libvirt / qemu migration progress). To speed up the switch to near
instant, do the following:

When a port located sends a packet to another port that has two
chassis then, in addition to sending the packet to the main chassis,
also send it to the additional chassis. When the sending port is bound
on either the main or additional chassis, then handle the packet
locally plus send it to the other chassis.

This is achieved with additional flows in tables 37 and 38.

Signed-off-by: Ihar Hrachyshka <ihrachys@redhat.com>
---
 controller/physical.c | 180 +++++++++++++++++++----
 tests/ovn.at          | 335 ++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 486 insertions(+), 29 deletions(-)
diff mbox series

Patch

diff --git a/controller/physical.c b/controller/physical.c
index bc4f7c4f0..8aa94d850 100644
--- a/controller/physical.c
+++ b/controller/physical.c
@@ -287,12 +287,13 @@  match_outport_dp_and_port_keys(struct match *match,
 }
 
 static void
-put_remote_port_redirect_overlay(const struct
-                                 sbrec_port_binding *binding,
+put_remote_port_redirect_overlay(const struct sbrec_port_binding *binding,
                                  bool is_ha_remote,
                                  struct ha_chassis_ordered *ha_ch_ordered,
                                  enum mf_field_id mff_ovn_geneve,
                                  const struct chassis_tunnel *tun,
+                                 const struct chassis_tunnel *additional_tun,
+                                 uint32_t dp_key,
                                  uint32_t port_key,
                                  struct match *match,
                                  struct ofpbuf *ofpacts_p,
@@ -301,14 +302,51 @@  put_remote_port_redirect_overlay(const struct
 {
     if (!is_ha_remote) {
         /* Setup encapsulation */
-        if (!tun) {
-            return;
+        bool is_vtep = !strcmp(binding->type, "vtep");
+        if (!additional_tun) {
+            /* Output to main chassis tunnel. */
+            put_encapsulation(mff_ovn_geneve, tun, binding->datapath, port_key,
+                              is_vtep, ofpacts_p);
+            ofpact_put_OUTPUT(ofpacts_p)->port = tun->ofport;
+
+            ofctrl_add_flow(flow_table, OFTABLE_REMOTE_OUTPUT, 100,
+                            binding->header_.uuid.parts[0],
+                            match, ofpacts_p, &binding->header_.uuid);
+        } else {
+            /* For packets arriving from tunnels, don't clone to avoid sending
+             * packets received from another chassis back to it. */
+            match_outport_dp_and_port_keys(match, dp_key, port_key);
+            match_set_reg_masked(match, MFF_LOG_FLAGS - MFF_REG0,
+                                 MLF_LOCAL_ONLY, MLF_LOCAL_ONLY);
+
+            /* Output to main chassis tunnel. */
+            put_encapsulation(mff_ovn_geneve, tun, binding->datapath, port_key,
+                              is_vtep, ofpacts_p);
+            ofpact_put_OUTPUT(ofpacts_p)->port = tun->ofport;
+
+            ofctrl_add_flow(flow_table, OFTABLE_REMOTE_OUTPUT, 110,
+                            binding->header_.uuid.parts[0], match, ofpacts_p,
+                            &binding->header_.uuid);
+
+            /* For packets originating from this chassis, clone in addition to
+             * handling it locally. */
+            match_outport_dp_and_port_keys(match, dp_key, port_key);
+            ofpbuf_clear(ofpacts_p);
+
+            /* Output to main chassis tunnel. */
+            put_encapsulation(mff_ovn_geneve, tun, binding->datapath, port_key,
+                              is_vtep, ofpacts_p);
+            ofpact_put_OUTPUT(ofpacts_p)->port = tun->ofport;
+
+            /* Output to additional chassis tunnel. */
+            put_encapsulation(mff_ovn_geneve, additional_tun,
+                              binding->datapath, port_key, is_vtep, ofpacts_p);
+            ofpact_put_OUTPUT(ofpacts_p)->port = additional_tun->ofport;
+
+            ofctrl_add_flow(flow_table, OFTABLE_REMOTE_OUTPUT, 100,
+                            binding->header_.uuid.parts[0], match, ofpacts_p,
+                            &binding->header_.uuid);
         }
-        put_encapsulation(mff_ovn_geneve, tun, binding->datapath, port_key,
-                          !strcmp(binding->type, "vtep"),
-                          ofpacts_p);
-        /* Output to tunnel. */
-        ofpact_put_OUTPUT(ofpacts_p)->port = tun->ofport;
     } else {
         /* Make sure all tunnel endpoints use the same encapsulation,
          * and set it up */
@@ -376,10 +414,11 @@  put_remote_port_redirect_overlay(const struct
         bundle->basis = 0;
         bundle->fields = NX_HASH_FIELDS_ETH_SRC;
         ofpact_finish_BUNDLE(ofpacts_p, &bundle);
+
+        ofctrl_add_flow(flow_table, OFTABLE_REMOTE_OUTPUT, 100,
+                        binding->header_.uuid.parts[0],
+                        match, ofpacts_p, &binding->header_.uuid);
     }
-    ofctrl_add_flow(flow_table, OFTABLE_REMOTE_OUTPUT, 100,
-                    binding->header_.uuid.parts[0],
-                    match, ofpacts_p, &binding->header_.uuid);
 }
 
 
@@ -728,6 +767,8 @@  put_local_common_flows(uint32_t dp_key,
                        const struct sbrec_port_binding *pb,
                        const struct sbrec_port_binding *parent_pb,
                        const struct zone_ids *zone_ids,
+                       const struct chassis_tunnel *additional_tun,
+                       enum mf_field_id mff_ovn_geneve,
                        struct ofpbuf *ofpacts_p,
                        struct ovn_desired_flow_table *flow_table)
 {
@@ -745,16 +786,42 @@  put_local_common_flows(uint32_t dp_key,
 
     ofpbuf_clear(ofpacts_p);
 
-    /* Match MFF_LOG_DATAPATH, MFF_LOG_OUTPORT. */
-    match_outport_dp_and_port_keys(&match, dp_key, port_key);
+    if (!additional_tun) {
+        match_outport_dp_and_port_keys(&match, dp_key, port_key);
 
-    put_zones_ofpacts(zone_ids, ofpacts_p);
+        put_zones_ofpacts(zone_ids, ofpacts_p);
+        put_resubmit(OFTABLE_CHECK_LOOPBACK, ofpacts_p);
+        ofctrl_add_flow(flow_table, OFTABLE_LOCAL_OUTPUT, 100,
+                        pb->header_.uuid.parts[0], &match, ofpacts_p,
+                        &pb->header_.uuid);
+    } else {
+        /* For packets arriving from tunnels, don't clone again. */
+        match_outport_dp_and_port_keys(&match, dp_key, port_key);
+        match_set_reg_masked(&match, MFF_LOG_FLAGS - MFF_REG0,
+                             MLF_LOCAL_ONLY, MLF_LOCAL_ONLY);
 
-    /* Resubmit to table 39. */
-    put_resubmit(OFTABLE_CHECK_LOOPBACK, ofpacts_p);
-    ofctrl_add_flow(flow_table, OFTABLE_LOCAL_OUTPUT, 100,
-                    pb->header_.uuid.parts[0], &match, ofpacts_p,
-                    &pb->header_.uuid);
+        put_zones_ofpacts(zone_ids, ofpacts_p);
+        put_resubmit(OFTABLE_CHECK_LOOPBACK, ofpacts_p);
+        ofctrl_add_flow(flow_table, OFTABLE_LOCAL_OUTPUT, 110,
+                        pb->header_.uuid.parts[0], &match, ofpacts_p,
+                        &pb->header_.uuid);
+
+        /* For packets originating from this chassis, clone in addition to
+         * handling it locally. */
+        match_outport_dp_and_port_keys(&match, dp_key, port_key);
+
+        ofpbuf_clear(ofpacts_p);
+        put_zones_ofpacts(zone_ids, ofpacts_p);
+        put_resubmit(OFTABLE_CHECK_LOOPBACK, ofpacts_p);
+
+        put_encapsulation(mff_ovn_geneve, additional_tun, pb->datapath,
+                          port_key, false, ofpacts_p);
+        ofpact_put_OUTPUT(ofpacts_p)->port = additional_tun->ofport;
+
+        ofctrl_add_flow(flow_table, OFTABLE_LOCAL_OUTPUT, 100,
+                        pb->header_.uuid.parts[0], &match, ofpacts_p,
+                        &pb->header_.uuid);
+    }
 
     /* Table 39, Priority 100.
      * =======================
@@ -887,6 +954,40 @@  get_binding_peer(struct ovsdb_idl_index *sbrec_port_binding_by_name,
     return peer;
 }
 
+static const struct chassis_tunnel *
+get_additional_tunnel(const struct sbrec_port_binding *binding,
+                      const struct sbrec_chassis *chassis,
+                      const struct hmap *chassis_tunnels)
+{
+    const struct chassis_tunnel *tun = NULL;
+    if (!binding->additional_chassis) {
+        return NULL;
+    }
+    if (binding->additional_chassis == chassis) {
+        tun = get_port_binding_tun(binding->encap, binding->chassis,
+                                   chassis_tunnels);
+        if (!tun) {
+            static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
+            VLOG_WARN_RL(
+                &rl, "Failed to locate tunnel to reach main chassis %s "
+                     "for port %s. Cloning packets disabled.",
+                binding->chassis->name, binding->logical_port);
+        }
+    } else {
+        tun = get_port_binding_tun(binding->additional_encap,
+                                   binding->additional_chassis,
+                                   chassis_tunnels);
+        if (!tun) {
+            static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
+            VLOG_WARN_RL(
+                &rl, "Failed to locate tunnel to reach additional chassis %s "
+                     "for port %s. Cloning packets disabled.",
+                binding->additional_chassis->name, binding->logical_port);
+        }
+    }
+    return tun;
+}
+
 static void
 consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name,
                       enum mf_field_id mff_ovn_geneve,
@@ -921,6 +1022,7 @@  consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name,
 
         struct zone_ids binding_zones = get_zone_ids(binding, ct_zones);
         put_local_common_flows(dp_key, binding, NULL, &binding_zones,
+                               NULL, mff_ovn_geneve,
                                ofpacts_p, flow_table);
 
         ofpbuf_clear(ofpacts_p);
@@ -1069,7 +1171,7 @@  consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name,
                                                 binding->logical_port);
         if (ofport && !lport_can_bind_on_this_chassis(chassis, binding)) {
             /* Even though there is an ofport for this port_binding, it is
-             * requested on a different chassis. So ignore this ofport.
+             * requested on different chassis. So ignore this ofport.
              */
             ofport = 0;
         }
@@ -1108,6 +1210,13 @@  consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name,
         }
     }
 
+    /* Clone packets to additional chassis if needed. */
+    const struct chassis_tunnel *additional_tun = NULL;
+    if (!localnet_port) {
+        additional_tun = get_additional_tunnel(binding, chassis,
+                                               chassis_tunnels);
+    }
+
     if (!is_remote) {
         /* Packets that arrive from a vif can belong to a VM or
          * to a container located inside that VM. Packets that
@@ -1118,6 +1227,7 @@  consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name,
         /* Pass the parent port binding if the port is a nested
          * container. */
         put_local_common_flows(dp_key, binding, parent_port, &zone_ids,
+                               additional_tun, mff_ovn_geneve,
                                ofpacts_p, flow_table);
 
         /* Table 0, Priority 150 and 100.
@@ -1346,7 +1456,9 @@  consider_port_binding(struct ovsdb_idl_index *sbrec_port_binding_by_name,
         } else {
             put_remote_port_redirect_overlay(binding, is_ha_remote,
                                              ha_ch_ordered, mff_ovn_geneve,
-                                             tun, port_key, &match, ofpacts_p,
+                                             tun, additional_tun,
+                                             dp_key, port_key,
+                                             &match, ofpacts_p,
                                              chassis_tunnels, flow_table);
         }
     }
@@ -1480,7 +1592,8 @@  consider_mc_group(struct ovsdb_idl_index *sbrec_port_binding_by_name,
             put_load(port->tunnel_key, MFF_LOG_OUTPORT, 0, 32,
                      &remote_ofpacts);
             put_resubmit(OFTABLE_CHECK_LOOPBACK, &remote_ofpacts);
-        } else if (port->chassis == chassis
+        } else if ((port->chassis == chassis
+                    || port->additional_chassis == chassis)
                    && (local_binding_get_primary_pb(local_bindings, lport_name)
                        || !strcmp(port->type, "l3gateway"))) {
             put_load(port->tunnel_key, MFF_LOG_OUTPORT, 0, 32, &ofpacts);
@@ -1503,15 +1616,24 @@  consider_mc_group(struct ovsdb_idl_index *sbrec_port_binding_by_name,
                     put_resubmit(OFTABLE_CHECK_LOOPBACK, &ofpacts);
                 }
             }
-        } else if (port->chassis && !get_localnet_port(
-                local_datapaths, mc->datapath->tunnel_key)) {
+        } else if (!get_localnet_port(local_datapaths,
+                                      mc->datapath->tunnel_key)) {
             /* Add remote chassis only when localnet port not exist,
              * otherwise multicast will reach remote ports through localnet
              * port. */
-            if (chassis_is_vtep(port->chassis)) {
-                sset_add(&vtep_chassis, port->chassis->name);
-            } else {
-                sset_add(&remote_chassis, port->chassis->name);
+            if (port->chassis) {
+                if (chassis_is_vtep(port->chassis)) {
+                    sset_add(&vtep_chassis, port->chassis->name);
+                } else {
+                    sset_add(&remote_chassis, port->chassis->name);
+                }
+            }
+            if (port->additional_chassis) {
+                if (chassis_is_vtep(port->additional_chassis)) {
+                    sset_add(&vtep_chassis, port->additional_chassis->name);
+                } else {
+                    sset_add(&remote_chassis, port->additional_chassis->name);
+                }
             }
         }
     }
diff --git a/tests/ovn.at b/tests/ovn.at
index 3a5973459..2c8c706df 100644
--- a/tests/ovn.at
+++ b/tests/ovn.at
@@ -13688,6 +13688,341 @@  OVN_CLEANUP([hv1],[hv2])
 AT_CLEANUP
 ])
 
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([basic connectivity with options:requested-additional-chassis])
+ovn_start
+
+net_add n1
+for i in 1 2 3; do
+    sim_add hv$i
+    as hv$i
+    check ovs-vsctl add-br br-phys
+    ovn_attach n1 br-phys 192.168.0.$i
+done
+
+# Disable local ARP responder to pass ARP requests through tunnels
+check ovn-nbctl ls-add ls0 -- add Logical_Switch ls0 other_config vlan-passthru=true
+
+check ovn-nbctl lsp-add ls0 first
+check ovn-nbctl lsp-add ls0 second
+check ovn-nbctl lsp-add ls0 third
+check ovn-nbctl lsp-add ls0 migrator
+check ovn-nbctl lsp-set-addresses first "00:00:00:00:00:01 10.0.0.1"
+check ovn-nbctl lsp-set-addresses second "00:00:00:00:00:02 10.0.0.2"
+check ovn-nbctl lsp-set-addresses third "00:00:00:00:00:03 10.0.0.3"
+check ovn-nbctl lsp-set-addresses migrator "00:00:00:00:00:ff 10.0.0.100"
+
+# The test scenario will migrate Migrator port between hv1 and hv2 and check
+# that connectivity to and from the port is functioning properly for both
+# chassis locations. Connectivity will be checked for resources located at hv1
+# (First) and hv2 (Second) as well as for hv3 (Third) that does not take part
+# in port migration.
+check ovn-nbctl lsp-set-options first requested-chassis=hv1
+check ovn-nbctl lsp-set-options second requested-chassis=hv2
+check ovn-nbctl lsp-set-options third requested-chassis=hv3
+
+as hv1 check ovs-vsctl -- add-port br-int first -- \
+    set Interface first external-ids:iface-id=first \
+    options:tx_pcap=hv1/first-tx.pcap \
+    options:rxq_pcap=hv1/first-rx.pcap
+as hv2 check ovs-vsctl -- add-port br-int second -- \
+    set Interface second external-ids:iface-id=second \
+    options:tx_pcap=hv2/second-tx.pcap \
+    options:rxq_pcap=hv2/second-rx.pcap
+as hv3 check ovs-vsctl -- add-port br-int third -- \
+    set Interface third external-ids:iface-id=third \
+    options:tx_pcap=hv3/third-tx.pcap \
+    options:rxq_pcap=hv3/third-rx.pcap
+
+# Create Migrator interfaces on both hv1 and hv2
+for hv in hv1 hv2; do
+    as $hv check ovs-vsctl -- add-port br-int migrator -- \
+        set Interface migrator external-ids:iface-id=migrator \
+        options:tx_pcap=$hv/migrator-tx.pcap \
+        options:rxq_pcap=$hv/migrator-rx.pcap
+done
+
+send_arp() {
+    local hv=$1 inport=$2 eth_src=$3 eth_dst=$4 spa=$5 tpa=$6
+    local request=${eth_dst}${eth_src}08060001080006040001${eth_src}${spa}${eth_dst}${tpa}
+    as ${hv} ovs-appctl netdev-dummy/receive $inport $request
+    echo "${request}"
+}
+
+reset_pcap_file() {
+    local hv=$1
+    local iface=$2
+    local pcap_file=$3
+    as $hv check ovs-vsctl -- set Interface $iface options:tx_pcap=dummy-tx.pcap \
+                                                   options:rxq_pcap=dummy-rx.pcap
+    check rm -f ${pcap_file}*.pcap
+    as $hv check ovs-vsctl -- set Interface $iface options:tx_pcap=${pcap_file}-tx.pcap \
+                                                   options:rxq_pcap=${pcap_file}-rx.pcap
+}
+
+reset_env() {
+    reset_pcap_file hv1 first hv1/first
+    reset_pcap_file hv2 second hv2/second
+    reset_pcap_file hv3 third hv3/third
+    reset_pcap_file hv1 migrator hv1/migrator
+    reset_pcap_file hv2 migrator hv2/migrator
+
+    for port in hv1/migrator hv2/migrator hv1/first hv2/second hv3/third; do
+        : > $port.expected
+    done
+}
+
+check_packets() {
+    OVN_CHECK_PACKETS([hv1/migrator-tx.pcap], [hv1/migrator.expected])
+    OVN_CHECK_PACKETS([hv2/migrator-tx.pcap], [hv2/migrator.expected])
+    OVN_CHECK_PACKETS([hv1/first-tx.pcap], [hv1/first.expected])
+    OVN_CHECK_PACKETS([hv2/second-tx.pcap], [hv2/second.expected])
+    OVN_CHECK_PACKETS([hv3/third-tx.pcap], [hv3/third.expected])
+}
+
+migrator_tpa=$(ip_to_hex 10 0 0 100)
+first_spa=$(ip_to_hex 10 0 0 1)
+second_spa=$(ip_to_hex 10 0 0 2)
+third_spa=$(ip_to_hex 10 0 0 3)
+
+for hv in hv1 hv2 hv3; do
+    wait_row_count Chassis 1 name=$hv
+done
+hv1_uuid=$(fetch_column Chassis _uuid name=hv1)
+hv2_uuid=$(fetch_column Chassis _uuid name=hv2)
+
+# Start with Migrator on hv1 but not hv2
+check ovn-nbctl lsp-set-options migrator requested-chassis=hv1
+wait_for_ports_up
+wait_column "$hv1_uuid" Port_Binding chassis logical_port=migrator
+wait_column "$hv1_uuid" Port_Binding requested_chassis logical_port=migrator
+wait_column "" Port_Binding additional_chassis logical_port=migrator
+wait_column "" Port_Binding requested_additional_chassis logical_port=migrator
+wait_for_ports_up
+
+reset_env
+
+OVN_POPULATE_ARP
+
+# check that...
+# unicast from First arrives to hv1:Migrator
+# unicast from First doesn't arrive to hv2:Migrator
+request=$(send_arp hv1 first 000000000001 0000000000ff $first_spa $migrator_tpa)
+echo $request >> hv1/migrator.expected
+
+# mcast from First arrives to hv1:Migrator
+# mcast from First doesn't arrive to hv2:Migrator
+request=$(send_arp hv1 first 000000000001 ffffffffffff $first_spa $migrator_tpa)
+echo $request >> hv1/migrator.expected
+echo $request >> hv2/second.expected
+echo $request >> hv3/third.expected
+
+# unicast from Second arrives to hv1:Migrator
+# unicast from Second doesn't arrive to hv2:Migrator
+request=$(send_arp hv2 second 000000000002 0000000000ff $second_spa $migrator_tpa)
+echo $request >> hv1/migrator.expected
+
+# mcast from Second arrives to hv1:Migrator
+# mcast from Second doesn't arrive to hv2:Migrator
+request=$(send_arp hv2 second 000000000002 ffffffffffff $second_spa $migrator_tpa)
+echo $request >> hv1/migrator.expected
+echo $request >> hv1/first.expected
+echo $request >> hv3/third.expected
+
+# unicast from Third arrives to hv1:Migrator
+# unicast from Third doesn't arrive to hv2:Migrator
+request=$(send_arp hv3 third 000000000003 0000000000ff $third_spa $migrator_tpa)
+echo $request >> hv1/migrator.expected
+
+# mcast from Third arrives to hv1:Migrator
+# mcast from Third doesn't arrive to hv2:Migrator
+request=$(send_arp hv3 third 000000000003 ffffffffffff $third_spa $migrator_tpa)
+echo $request >> hv1/migrator.expected
+echo $request >> hv1/first.expected
+echo $request >> hv2/second.expected
+
+# unicast from hv1:Migrator arrives to First, Second, and Third
+request=$(send_arp hv1 migrator 0000000000ff 000000000001 $migrator_tpa $first_spa)
+echo $request >> hv1/first.expected
+request=$(send_arp hv1 migrator 0000000000ff 000000000002 $migrator_tpa $second_spa)
+echo $request >> hv2/second.expected
+request=$(send_arp hv1 migrator 0000000000ff 000000000003 $migrator_tpa $third_spa)
+echo $request >> hv3/third.expected
+
+# unicast from hv2:Migrator doesn't arrive to First, Second, or Third
+request=$(send_arp hv2 migrator 0000000000ff 000000000001 $migrator_tpa $first_spa)
+request=$(send_arp hv2 migrator 0000000000ff 000000000002 $migrator_tpa $second_spa)
+request=$(send_arp hv2 migrator 0000000000ff 000000000003 $migrator_tpa $third_spa)
+
+# mcast from hv1:Migrator arrives to First, Second, and Third
+request=$(send_arp hv1 migrator 0000000000ff ffffffffffff $migrator_tpa $first_spa)
+echo $request >> hv1/first.expected
+echo $request >> hv2/second.expected
+echo $request >> hv3/third.expected
+
+# mcast from hv2:Migrator doesn't arrive to First, Second, or Third
+request=$(send_arp hv2 migrator 0000000000ff ffffffffffff $migrator_tpa $first_spa)
+
+check_packets
+reset_env
+
+# Start port migration hv1 -> hv2: both hypervisors are now bound
+check ovn-nbctl lsp-set-options migrator requested-chassis=hv1 \
+                                         requested-additional-chassis=hv2
+wait_for_ports_up
+wait_column "$hv1_uuid" Port_Binding chassis logical_port=migrator
+wait_column "$hv1_uuid" Port_Binding requested_chassis logical_port=migrator
+wait_column "$hv2_uuid" Port_Binding additional_chassis logical_port=migrator
+wait_column "$hv2_uuid" Port_Binding requested_additional_chassis logical_port=migrator
+
+# check that...
+# unicast from First arrives to hv1:Migrator
+# unicast from First arrives to hv2:Migrator
+request=$(send_arp hv1 first 000000000001 0000000000ff $first_spa $migrator_tpa)
+echo $request >> hv1/migrator.expected
+echo $request >> hv2/migrator.expected
+
+# mcast from First arrives to hv1:Migrator
+# mcast from First arrives to hv2:Migrator
+request=$(send_arp hv1 first 000000000001 ffffffffffff $first_spa $migrator_tpa)
+echo $request >> hv1/migrator.expected
+echo $request >> hv2/migrator.expected
+echo $request >> hv3/third.expected
+echo $request >> hv2/second.expected
+
+# unicast from Second arrives to hv1:Migrator
+# unicast from Second arrives to hv2:Migrator
+request=$(send_arp hv2 second 000000000002 0000000000ff $second_spa $migrator_tpa)
+echo $request >> hv1/migrator.expected
+echo $request >> hv2/migrator.expected
+
+# mcast from Second arrives to hv1:Migrator
+# mcast from Second arrives to hv2:Migrator
+request=$(send_arp hv2 second 000000000002 ffffffffffff $second_spa $migrator_tpa)
+echo $request >> hv1/migrator.expected
+echo $request >> hv2/migrator.expected
+echo $request >> hv3/third.expected
+echo $request >> hv1/first.expected
+
+# unicast from Third arrives to hv1:Migrator binding
+# unicast from Third arrives to hv2:Migrator binding
+request=$(send_arp hv3 third 000000000003 0000000000ff $third_spa $migrator_tpa)
+echo $request >> hv1/migrator.expected
+echo $request >> hv2/migrator.expected
+
+# mcast from Third arrives to hv1:Migrator
+# mcast from Third arrives to hv2:Migrator
+request=$(send_arp hv3 third 000000000003 ffffffffffff $third_spa $migrator_tpa)
+echo $request >> hv1/migrator.expected
+echo $request >> hv2/migrator.expected
+echo $request >> hv1/first.expected
+echo $request >> hv2/second.expected
+
+# unicast from hv1:Migrator arrives to First, Second, and Third
+request=$(send_arp hv1 migrator 0000000000ff 000000000001 $migrator_tpa $first_spa)
+echo $request >> hv1/first.expected
+request=$(send_arp hv1 migrator 0000000000ff 000000000002 $migrator_tpa $second_spa)
+echo $request >> hv2/second.expected
+request=$(send_arp hv1 migrator 0000000000ff 000000000003 $migrator_tpa $third_spa)
+echo $request >> hv3/third.expected
+
+# unicast from hv2:Migrator arrives to First, Second, and Third
+request=$(send_arp hv2 migrator 0000000000ff 000000000001 $migrator_tpa $first_spa)
+echo $request >> hv1/first.expected
+request=$(send_arp hv2 migrator 0000000000ff 000000000002 $migrator_tpa $second_spa)
+echo $request >> hv2/second.expected
+request=$(send_arp hv2 migrator 0000000000ff 000000000003 $migrator_tpa $third_spa)
+echo $request >> hv3/third.expected
+
+# mcast from hv1:Migrator arrives to First, Second, and Third
+request=$(send_arp hv1 migrator 0000000000ff ffffffffffff $migrator_tpa $first_spa)
+echo $request >> hv1/first.expected
+echo $request >> hv2/second.expected
+echo $request >> hv3/third.expected
+
+# mcast from hv2:Migrator arrives to First, Second, and Third
+request=$(send_arp hv2 migrator 0000000000ff ffffffffffff $migrator_tpa $first_spa)
+echo $request >> hv1/first.expected
+echo $request >> hv2/second.expected
+echo $request >> hv3/third.expected
+
+check_packets
+reset_env
+
+# Complete migration: destination is bound
+check ovn-nbctl lsp-set-options migrator requested-chassis=hv2
+wait_for_ports_up
+wait_column "$hv2_uuid" Port_Binding chassis logical_port=migrator
+wait_column "$hv2_uuid" Port_Binding requested_chassis logical_port=migrator
+wait_column "" Port_Binding additional_chassis logical_port=migrator
+wait_column "" Port_Binding requested_additional_chassis logical_port=migrator
+
+# check that...
+# unicast from Third doesn't arrive to hv1:Migrator
+# unicast from Third arrives to hv2:Migrator
+request=$(send_arp hv3 third 000000000003 0000000000ff $third_spa $migrator_tpa)
+echo $request >> hv2/migrator.expected
+
+# mcast from Third doesn't arrive to hv1:Migrator
+# mcast from Third arrives to hv2:Migrator
+request=$(send_arp hv3 third 000000000003 ffffffffffff $third_spa $migrator_tpa)
+echo $request >> hv2/migrator.expected
+echo $request >> hv1/first.expected
+echo $request >> hv2/second.expected
+
+# unicast from First doesn't arrive to hv1:Migrator
+# unicast from First arrives to hv2:Migrator
+request=$(send_arp hv1 first 000000000001 0000000000ff $first_spa $migrator_tpa)
+echo $request >> hv2/migrator.expected
+
+# mcast from First doesn't arrive to hv1:Migrator
+# mcast from First arrives to hv2:Migrator binding
+request=$(send_arp hv1 first 000000000001 ffffffffffff $first_spa $migrator_tpa)
+echo $request >> hv2/migrator.expected
+echo $request >> hv2/second.expected
+echo $request >> hv3/third.expected
+
+# unicast from Second doesn't arrive to hv1:Migrator
+# unicast from Second arrives to hv2:Migrator
+request=$(send_arp hv2 second 000000000002 0000000000ff $second_spa $migrator_tpa)
+echo $request >> hv2/migrator.expected
+
+# mcast from Second doesn't arrive to hv1:Migrator
+# mcast from Second arrives to hv2:Migrator
+request=$(send_arp hv2 second 000000000002 ffffffffffff $second_spa $migrator_tpa)
+echo $request >> hv2/migrator.expected
+echo $request >> hv1/first.expected
+echo $request >> hv3/third.expected
+
+# unicast from hv1:Migrator doesn't arrive to First, Second, or Third
+request=$(send_arp hv1 migrator 0000000000ff 000000000001 $migrator_tpa $first_spa)
+request=$(send_arp hv1 migrator 0000000000ff 000000000002 $migrator_tpa $second_spa)
+request=$(send_arp hv1 migrator 0000000000ff 000000000003 $migrator_tpa $third_spa)
+
+# unicast from hv2:Migrator arrives to First, Second, and Third
+request=$(send_arp hv2 migrator 0000000000ff 000000000001 $migrator_tpa $first_spa)
+echo $request >> hv1/first.expected
+request=$(send_arp hv2 migrator 0000000000ff 000000000002 $migrator_tpa $second_spa)
+echo $request >> hv2/second.expected
+request=$(send_arp hv2 migrator 0000000000ff 000000000003 $migrator_tpa $third_spa)
+echo $request >> hv3/third.expected
+
+# mcast from hv1:Migrator doesn't arrive to First, Second, or Third
+request=$(send_arp hv1 migrator 0000000000ff ffffffffffff $migrator_tpa $first_spa)
+
+# mcast from hv2:Migrator arrives to First, Second, and Third
+request=$(send_arp hv2 migrator 0000000000ff ffffffffffff $migrator_tpa $first_spa)
+echo $request >> hv1/first.expected
+echo $request >> hv2/second.expected
+echo $request >> hv3/third.expected
+
+check_packets
+
+OVN_CLEANUP([hv1],[hv2],[hv3])
+
+AT_CLEANUP
+])
+
 OVN_FOR_EACH_NORTHD([
 AT_SETUP([options:requested-chassis for logical port])
 ovn_start