diff mbox series

[ovs-dev,1/3] ovn-controller: Add OF rules for port security.

Message ID 20220513004310.3948066-1-numans@ovn.org
State Superseded
Headers show
Series Adding generic port security flows. | expand

Checks

Context Check Description
ovsrobot/apply-robot success apply and check: success
ovsrobot/github-robot-_Build_and_Test success github build: passed
ovsrobot/github-robot-_ovn-kubernetes success github build: passed

Commit Message

Numan Siddique May 13, 2022, 12:43 a.m. UTC
From: Numan Siddique <numans@ovn.org>

ovn-controller will now generate OF rules for in port security and
out port security checks in OF tables - 73, 74 and 75.  These flows
will be added  if a port binding has port security defined in the
Port_Binding.Port_Security column which is newly added in this patch.

The idea of this patch is to program these OF rules directly within
the ovn-controller instead of ovn-northd generating logical flows.
This helps in reducing the numnber of logical flows overall in the
Southbound database.

Upcoming patches will add the necessary OVN actions which ovn-northd
can make use of.

Reported-at: https://bugzilla.redhat.com/show_bug.cgi?id=2078927
Suggested-by: Dumitru Ceara <dceara@redhat.com>
Signed-off-by: Numan Siddique <numans@ovn.org>
---
 controller/binding.c         |  78 +++-
 controller/binding.h         |  23 +-
 controller/lflow.c           | 792 ++++++++++++++++++++++++++++++++++-
 controller/lflow.h           |   4 +
 controller/ovn-controller.c  |  21 +-
 include/ovn/actions.h        |   4 +
 include/ovn/logical-fields.h |   1 +
 ovn-sb.ovsschema             |   7 +-
 ovn-sb.xml                   |  15 +
 tests/ovn.at                 | 288 +++++++++++++
 10 files changed, 1199 insertions(+), 34 deletions(-)
diff mbox series

Patch

diff --git a/controller/binding.c b/controller/binding.c
index e5ba56b25b..1fdde74df9 100644
--- a/controller/binding.c
+++ b/controller/binding.c
@@ -524,24 +524,6 @@  update_active_pb_ras_pd(const struct sbrec_port_binding *pb,
     }
 }
 
-/* This structure represents a logical port (or port binding)
- * which is associated with 'struct local_binding'.
- *
- * An instance of 'struct binding_lport' is created for a logical port
- *  - If the OVS interface's iface-id corresponds to the logical port.
- *  - If it is a container or virtual logical port and its parent
- *    has a 'local binding'.
- *
- */
-struct binding_lport {
-    struct ovs_list list_node; /* Node in local_binding.binding_lports. */
-
-    char *name;
-    const struct sbrec_port_binding *pb;
-    struct local_binding *lbinding;
-    enum en_lport_type type;
-};
-
 static struct local_binding *local_binding_create(
     const char *name, const struct ovsrec_interface *);
 static void local_binding_add(struct shash *local_bindings,
@@ -584,6 +566,11 @@  static const struct sbrec_port_binding *binding_lport_get_parent_pb(
     struct binding_lport *b_lprt);
 static struct binding_lport *binding_lport_check_and_cleanup(
     struct binding_lport *, struct shash *b_lports);
+static bool binding_lport_has_port_sec_changed(
+    struct binding_lport *, const struct sbrec_port_binding *);
+static void binding_lport_clear_port_sec(struct binding_lport *);
+static bool binding_lport_update_port_sec(
+    struct binding_lport *, const struct sbrec_port_binding *);
 
 static char *get_lport_type_str(enum en_lport_type lport_type);
 static bool ovs_iface_matches_lport_iface_id_ver(
@@ -1105,6 +1092,11 @@  consider_vif_lport_(const struct sbrec_port_binding *pb,
                                b_ctx_out->tracked_dp_bindings);
             update_related_lport(pb, b_ctx_out);
             update_local_lports(pb->logical_port, b_ctx_out);
+            if (binding_lport_update_port_sec(b_lport, pb) &&
+                    b_ctx_out->tracked_dp_bindings) {
+                tracked_datapath_lport_add(pb, TRACKED_RESOURCE_UPDATED,
+                                           b_ctx_out->tracked_dp_bindings);
+            }
             if (b_lport->lbinding->iface && qos_map && b_ctx_in->ovs_idl_txn) {
                 get_qos_params(pb, qos_map);
             }
@@ -2727,6 +2719,7 @@  binding_lport_destroy(struct binding_lport *b_lport)
         ovs_list_remove(&b_lport->list_node);
     }
 
+    binding_lport_clear_port_sec(b_lport);
     free(b_lport->name);
     free(b_lport);
 }
@@ -2853,6 +2846,55 @@  cleanup:
 }
 
 
+static bool
+binding_lport_has_port_sec_changed(struct binding_lport *b_lport,
+                                   const struct sbrec_port_binding *pb)
+{
+    if (b_lport->n_port_security != pb->n_port_security) {
+        return true;
+    }
+
+    for (size_t i = 0; i < b_lport->n_port_security; i++) {
+        if (strcmp(b_lport->port_security[i], pb->port_security[i])) {
+            return true;
+        }
+    }
+
+    return false;
+}
+
+static void
+binding_lport_clear_port_sec(struct binding_lport *b_lport)
+{
+    for (size_t i = 0; i < b_lport->n_port_security; i++) {
+        free(b_lport->port_security[i]);
+    }
+    free(b_lport->port_security);
+    b_lport->n_port_security = 0;
+}
+
+static bool
+binding_lport_update_port_sec(struct binding_lport *b_lport,
+                              const struct sbrec_port_binding *pb)
+{
+    if (binding_lport_has_port_sec_changed(b_lport, pb)) {
+        binding_lport_clear_port_sec(b_lport);
+        b_lport->port_security =
+            pb->n_port_security ?
+            xmalloc(pb->n_port_security * sizeof *b_lport->port_security) :
+            NULL;
+
+        b_lport->n_port_security = pb->n_port_security;
+        for (size_t i = 0; i < pb->n_port_security; i++) {
+            b_lport->port_security[i] = xstrdup(pb->port_security[i]);
+        }
+
+        return true;
+    }
+
+    return false;
+}
+
 static bool
 ovs_iface_matches_lport_iface_id_ver(const struct ovsrec_interface *iface,
                                      const struct sbrec_port_binding *pb)
diff --git a/controller/binding.h b/controller/binding.h
index 430a8d9b17..5c29e2c930 100644
--- a/controller/binding.h
+++ b/controller/binding.h
@@ -134,7 +134,6 @@  struct local_binding {
     struct ovs_list binding_lports;
 };
 
-
 struct local_binding_data {
     struct shash bindings;
     struct shash lports;
@@ -192,4 +191,26 @@  enum en_lport_type {
 
 enum en_lport_type get_lport_type(const struct sbrec_port_binding *);
 
+/* This structure represents a logical port (or port binding)
+ * which is associated with 'struct local_binding'.
+ *
+ * An instance of 'struct binding_lport' is created for a logical port
+ *  - If the OVS interface's iface-id corresponds to the logical port.
+ *  - If it is a container or virtual logical port and its parent
+ *    has a 'local binding'.
+ *
+ */
+struct binding_lport {
+    struct ovs_list list_node; /* Node in local_binding.binding_lports. */
+
+    char *name;
+    const struct sbrec_port_binding *pb;
+    struct local_binding *lbinding;
+    enum en_lport_type type;
+
+    /* Cached port security. */
+    char **port_security;
+    size_t n_port_security;
+};
+
 #endif /* controller/binding.h */
diff --git a/controller/lflow.c b/controller/lflow.c
index 66376ad8c4..580146c8ec 100644
--- a/controller/lflow.c
+++ b/controller/lflow.c
@@ -14,6 +14,7 @@ 
  */
 
 #include <config.h>
+#include "binding.h"
 #include "lflow.h"
 #include "coverage.h"
 #include "ha-chassis.h"
@@ -114,6 +115,11 @@  static void ref_lflow_node_destroy(struct ref_lflow_node *);
 static void lflow_resource_destroy_lflow(struct lflow_resource_ref *,
                                          const struct uuid *lflow_uuid);
 
+static void add_port_sec_flows(const struct shash *binding_lports,
+                               const struct sbrec_chassis *,
+                               struct ovn_desired_flow_table *);
+static void consider_port_sec_flows(const struct sbrec_port_binding *pb,
+                                    struct ovn_desired_flow_table *);
 
 static bool
 lookup_port_cb(const void *aux_, const char *port_name, unsigned int *portp)
@@ -1152,6 +1158,8 @@  add_matches_to_flow_table(const struct sbrec_logical_flow *lflow,
         .ct_snat_vip_ptable = OFTABLE_CT_SNAT_HAIRPIN,
         .fdb_ptable = OFTABLE_GET_FDB,
         .fdb_lookup_ptable = OFTABLE_LOOKUP_FDB,
+        .in_port_sec_ptable = OFTABLE_CHK_IN_PORT_SEC,
+        .out_port_sec_ptable = OFTABLE_CHK_OUT_PORT_SEC,
         .ctrl_meter_id = ctrl_meter_id,
     };
     ovnacts_encode(ovnacts->data, ovnacts->size, &ep, &ofpacts);
@@ -2525,6 +2533,8 @@  lflow_run(struct lflow_ctx_in *l_ctx_in, struct lflow_ctx_out *l_ctx_out)
                          l_ctx_out->hairpin_id_pool);
     add_fdb_flows(l_ctx_in->fdb_table, l_ctx_in->local_datapaths,
                   l_ctx_out->flow_table);
+    add_port_sec_flows(l_ctx_in->binding_lports, l_ctx_in->chassis,
+                       l_ctx_out->flow_table);
 }
 
 /* Should be called at every ovn-controller iteration before IDL tracked
@@ -2698,8 +2708,19 @@  lflow_handle_flows_for_lport(const struct sbrec_port_binding *pb,
 {
     bool changed;
 
-    return lflow_handle_changed_ref(REF_TYPE_PORTBINDING, pb->logical_port,
-                                    l_ctx_in, l_ctx_out, &changed);
+    if (!lflow_handle_changed_ref(REF_TYPE_PORTBINDING, pb->logical_port,
+                                  l_ctx_in, l_ctx_out, &changed)) {
+        return false;
+    }
+
+    /* Program the port security flows. */
+    ofctrl_remove_flows(l_ctx_out->flow_table, &pb->header_.uuid);
+
+    if (pb->n_port_security && shash_find(l_ctx_in->binding_lports,
+                                          pb->logical_port)) {
+        consider_port_sec_flows(pb, l_ctx_out->flow_table);
+    }
+    return true;
 }
 
 /* Handles port-binding add/deletions. */
@@ -2836,3 +2857,770 @@  lflow_handle_changed_fdbs(struct lflow_ctx_in *l_ctx_in,
 
     return true;
 }
+
+static void
+add_port_sec_flows(const struct shash *binding_lports,
+                   const struct sbrec_chassis *chassis,
+                   struct ovn_desired_flow_table *flow_table)
+{
+    const struct shash_node *node;
+    SHASH_FOR_EACH (node, binding_lports) {
+        const struct binding_lport *b_lport = node->data;
+        if (!b_lport->pb || b_lport->pb->chassis != chassis) {
+            continue;
+        }
+
+        consider_port_sec_flows(b_lport->pb, flow_table);
+    }
+}
+
+static void
+reset_match_for_port_sec_flows(const struct sbrec_port_binding *pb,
+                               enum mf_field_id reg_id, struct match *match)
+{
+    match_init_catchall(match);
+    match_set_metadata(match, htonll(pb->datapath->tunnel_key));
+    match_set_reg(match, reg_id - MFF_REG0, pb->tunnel_key);
+}
+
+static void build_port_sec_deny_action(struct ofpbuf *ofpacts)
+{
+    ofpbuf_clear(ofpacts);
+    uint8_t value = 1;
+    put_load(&value, sizeof value, MFF_LOG_FLAGS,
+             MLF_CHECK_PORT_SEC_BIT, 1, ofpacts);
+}
+
+static void build_port_sec_allow_action(struct ofpbuf *ofpacts)
+{
+    ofpbuf_clear(ofpacts);
+    uint8_t value = 0;
+    put_load(&value, sizeof value, MFF_LOG_FLAGS,
+             MLF_CHECK_PORT_SEC_BIT, 1, ofpacts);
+}
+
+static void build_port_sec_adv_nd_check(struct ofpbuf *ofpacts)
+{
+    ofpbuf_clear(ofpacts);
+    struct ofpact_resubmit *resubmit = ofpact_put_RESUBMIT(ofpacts);
+    resubmit->in_port = OFPP_IN_PORT;
+    resubmit->table_id = OFTABLE_CHK_IN_PORT_SEC_ND;
+}
+
+static void
+build_in_port_sec_default_flows(const struct sbrec_port_binding *pb,
+                                struct match *m, struct ofpbuf *ofpacts,
+                                struct ovn_desired_flow_table *flow_table)
+{
+    reset_match_for_port_sec_flows(pb, MFF_LOG_INPORT, m);
+    build_port_sec_deny_action(ofpacts);
+
+    /* Add the below logical flow equivalent OF rule in 'in_port_sec' table.
+     * priority: 80
+     * match - "inport == pb->logical_port"
+     * action - "port_sec_failed = 1;"
+     * description: "Default drop all traffic from""
+     */
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC, 80,
+                    pb->header_.uuid.parts[0], m, ofpacts,
+                    &pb->header_.uuid);
+
+    /* ARP checking is done in the next table. So just advance
+     * the arp packets to the next table.
+     *
+     * Add the below logical flow equivalent OF rules in 'in_port_sec' table.
+     * priority: 95
+     * match - "inport == pb->logical_port && arp"
+     * action - "resubmit(,PORT_SEC_ND_TABLE);"
+     */
+    match_set_dl_type(m, htons(ETH_TYPE_ARP));
+    build_port_sec_adv_nd_check(ofpacts);
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC, 95,
+                    pb->header_.uuid.parts[0], m, ofpacts,
+                    &pb->header_.uuid);
+
+    /* Add the below logical flow equivalent OF rules in 'in_port_sec_nd' table
+     * priority: 80
+     * match - "inport == pb->logical_port && arp"
+     * action - "port_sec_failed = 1;"
+     * description: "Default drop all arp packets"
+     * note: "Higher priority flows are added to allow the legit ARP packets."
+     */
+    reset_match_for_port_sec_flows(pb, MFF_LOG_INPORT, m);
+    build_port_sec_deny_action(ofpacts);
+    match_set_dl_type(m, htons(ETH_TYPE_ARP));
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC_ND, 80,
+                    pb->header_.uuid.parts[0], m, ofpacts,
+                    &pb->header_.uuid);
+
+    /* Add the below logical flow equivalent OF rules in 'in_port_sec_nd' table
+     * priority: 80
+     * match - "inport == pb->logical_port && icmp6 && icmp6.code == 136"
+     * action - "port_sec_failed = 1;"
+     * description: "Default drop all IPv6 NA packets"
+     * note: "Higher priority flows are added to allow the legit NA packets."
+     */
+    match_set_dl_type(m, htons(ETH_TYPE_IPV6));
+    match_set_nw_proto(m, IPPROTO_ICMPV6);
+    match_set_nw_ttl(m, 255);
+    match_set_icmp_type(m, 136);
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC_ND, 80,
+                    pb->header_.uuid.parts[0], m, ofpacts,
+                    &pb->header_.uuid);
+
+    /* Add the below logical flow equivalent OF rules in 'in_port_sec_nd' table
+     * priority: 80
+     * match - "inport == pb->logical_port && icmp6 && icmp6.code == 135"
+     * action - "port_sec_failed = 0;"
+     * description: "Default allow all IPv6 NS packets"
+     * note: This is a hack for now.  Ideally we should do default drop.
+     *       There seems to be a bug in ovs-vswitchd which needs further
+     *       investigation.
+     *
+     * Eg.  If there are below OF rules in the same table
+     * (1) priority=90,icmp6,reg14=0x1,metadata=0x1,nw_ttl=225,icmp_type=135,
+     *     icmp_code=0,nd_sll=fa:16:3e:94:05:98
+     *     actions=load:0->NXM_NX_REG10[12]
+     * (2) priority=80,icmp6,reg14=0x1,metadata=0x1,nw_ttl=225,icmp_type=135,
+     *     icmp_code=0 actions=load:1->NXM_NX_REG10[12]
+     *
+     * An IPv6 NS packet with nd_sll = fa:16:3e:94:05:98 is matching on the
+     * second prio-80 flow instead of the first one.
+     */
+    match_set_dl_type(m, htons(ETH_TYPE_IPV6));
+    match_set_nw_proto(m, IPPROTO_ICMPV6);
+    match_set_nw_ttl(m, 255);
+    match_set_icmp_type(m, 135);
+    build_port_sec_allow_action(ofpacts); /*TODO:  Change this to
+                                           * build_port_sec_deny_action(). */
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC_ND, 80,
+                    pb->header_.uuid.parts[0], m, ofpacts,
+                    &pb->header_.uuid);
+}
+
+static void
+build_in_port_sec_no_ip_flows(const struct sbrec_port_binding *pb,
+                              struct lport_addresses *ps_addr,
+                              struct match *m, struct ofpbuf *ofpacts,
+                              struct ovn_desired_flow_table *flow_table)
+{
+    if (ps_addr->n_ipv4_addrs || ps_addr->n_ipv6_addrs) {
+        return;
+    }
+
+    /* Add the below logical flow equivalent OF rules in 'in_port_sec' table.
+     * priority: 90
+     * match - "inport == pb->logical_port && eth.src == ps_addr.ea"
+     * action - "next;"
+     * description: "Advance the packet for ARP/ND check"
+     */
+    reset_match_for_port_sec_flows(pb, MFF_LOG_INPORT, m);
+    match_set_dl_src(m, ps_addr->ea);
+    build_port_sec_adv_nd_check(ofpacts);
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC, 90,
+                    pb->header_.uuid.parts[0], m, ofpacts,
+                    &pb->header_.uuid);
+}
+
+static void
+build_in_port_sec_ip4_flows(const struct sbrec_port_binding *pb,
+                           struct lport_addresses *ps_addr,
+                           struct match *m, struct ofpbuf *ofpacts,
+                           struct ovn_desired_flow_table *flow_table)
+{
+    if (!ps_addr->n_ipv4_addrs) {
+        /* If no IPv4 addresses, then 'pb' is not allowed to send IPv4 traffic.
+         * build_in_port_sec_default_flows() takes care of this scenario. */
+        return;
+    }
+
+    /* Advance all traffic from the port security eth address for ND check. */
+    build_port_sec_allow_action(ofpacts);
+
+    /* Add the below logical flow equivalent OF rules in in_port_sec.
+     * priority: 90
+     * match - "inport == pb->port && eth.src == ps_addr.ea &&
+     *         ip4.src == {ps_addr.ipv4_addrs}"
+     * action - "port_sec_failed = 0;"
+     */
+    for (size_t j = 0; j < ps_addr->n_ipv4_addrs; j++) {
+        reset_match_for_port_sec_flows(pb, MFF_LOG_INPORT, m);
+        match_set_dl_src(m, ps_addr->ea);
+        match_set_dl_type(m, htons(ETH_TYPE_IP));
+
+        ovs_be32 mask = ps_addr->ipv4_addrs[j].mask;
+        /* When the netmask is applied, if the host portion is
+         * non-zero, the host can only use the specified
+         * address.  If zero, the host is allowed to use any
+         * address in the subnet.
+         */
+        if (ps_addr->ipv4_addrs[j].plen == 32 ||
+                ps_addr->ipv4_addrs[j].addr & ~mask) {
+            match_set_nw_src(m, ps_addr->ipv4_addrs[j].addr);
+        } else {
+            match_set_nw_src_masked(m, ps_addr->ipv4_addrs[j].addr, mask);
+        }
+
+        ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC, 90,
+                        pb->header_.uuid.parts[0], m, ofpacts,
+                        &pb->header_.uuid);
+    }
+
+    /* Add the below logical flow equivalent OF rules in in_port_sec.
+     * priority: 90
+     * match - "inport == pb->port && eth.src == ps_addr.ea &&
+     *          ip4.src == 0.0.0.0 && ip4.dst == 255.255.255.255 &&
+     *          udp.src == 67 && udp.dst == 68"
+     * action - "port_sec_failed = 0;"
+     * description: "Allow the DHCP requests."
+     */
+    reset_match_for_port_sec_flows(pb, MFF_LOG_INPORT, m);
+    match_set_dl_src(m, ps_addr->ea);
+    match_set_dl_type(m, htons(ETH_TYPE_IP));
+
+    ovs_be32 ip4 = htonl(0);
+    match_set_nw_src(m, ip4);
+    ip4 = htonl(0xffffffff);
+    match_set_nw_dst(m, ip4);
+    match_set_nw_proto(m, IPPROTO_UDP);
+    match_set_tp_src(m, htons(68));
+    match_set_tp_dst(m, htons(67));
+
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC, 90,
+                    pb->header_.uuid.parts[0], m, ofpacts,
+                    &pb->header_.uuid);
+}
+
+/* Adds the OF rules to allow ARP packets in 'in_port_sec_nd' table. */
+static void
+build_in_port_sec_arp_flows(const struct sbrec_port_binding *pb,
+                           struct lport_addresses *ps_addr,
+                           struct match *m, struct ofpbuf *ofpacts,
+                           struct ovn_desired_flow_table *flow_table)
+{
+    if (!ps_addr->n_ipv4_addrs && ps_addr->n_ipv6_addrs) {
+        /* No ARP is allowed as no IPv4 addresses are configured. */
+        return;
+    }
+
+    build_port_sec_allow_action(ofpacts);
+
+    if (!ps_addr->n_ipv4_addrs) {
+        /* No IPv4 addresses.
+         * Add the below logical flow equivalent OF rules in 'in_port_sec_nd'
+         * table.
+         * priority: 90
+         * match - "inport == pb->port && eth.src == ps_addr.ea &&
+         *          arp && arp.sha == ps_addr.ea"
+         * action - "port_sec_failed = 0;"
+         */
+        reset_match_for_port_sec_flows(pb, MFF_LOG_INPORT, m);
+        match_set_dl_src(m, ps_addr->ea);
+        match_set_dl_type(m, htons(ETH_TYPE_ARP));
+        match_set_arp_sha(m, ps_addr->ea);
+        ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC_ND, 90,
+                        pb->header_.uuid.parts[0], m, ofpacts,
+                        &pb->header_.uuid);
+    }
+
+    /* Add the below logical flow equivalent OF rules in 'in_port_sec_nd'
+     * table.
+     * priority: 90
+     * match - "inport == pb->port && eth.src == ps_addr.ea &&
+     *         arp && arp.sha == ps_addr.ea && arp.spa == {ps_addr.ipv4_addrs}"
+     * action - "port_sec_failed = 0;"
+     */
+    for (size_t j = 0; j < ps_addr->n_ipv4_addrs; j++) {
+        reset_match_for_port_sec_flows(pb, MFF_LOG_INPORT, m);
+        match_set_dl_src(m, ps_addr->ea);
+        match_set_dl_type(m, htons(ETH_TYPE_ARP));
+        match_set_arp_sha(m, ps_addr->ea);
+
+        ovs_be32 mask = ps_addr->ipv4_addrs[j].mask;
+        if (ps_addr->ipv4_addrs[j].plen == 32 ||
+                ps_addr->ipv4_addrs[j].addr & ~mask) {
+            match_set_nw_src(m, ps_addr->ipv4_addrs[j].addr);
+        } else {
+            match_set_nw_src_masked(m, ps_addr->ipv4_addrs[j].addr, mask);
+        }
+        ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC_ND, 90,
+                        pb->header_.uuid.parts[0], m, ofpacts,
+                        &pb->header_.uuid);
+    }
+}
+
+static void
+build_in_port_sec_ip6_flows(const struct sbrec_port_binding *pb,
+                           struct lport_addresses *ps_addr,
+                           struct match *m, struct ofpbuf *ofpacts,
+                           struct ovn_desired_flow_table *flow_table)
+{
+    if (!ps_addr->n_ipv6_addrs) {
+        /* If no IPv6 addresses, then 'pb' is not allowed to send IPv6 traffic.
+         * build_in_port_sec_default_flows() takes care of this scenario. */
+        return;
+    }
+
+    /* Add the below logical flow equivalent OF rules in 'in_port_sec_nd'
+     * table.
+     * priority: 90
+     * match - "inport == pb->port && eth.src == ps_addr.ea &&
+     *         ip6.src == {ps_addr.ipv6_addrs, lla}"
+     * action - "next;"
+     * description - Advance the packet for Neighbor Solicit/Adv check.
+     */
+    build_port_sec_adv_nd_check(ofpacts);
+
+    for (size_t j = 0; j < ps_addr->n_ipv6_addrs; j++) {
+        reset_match_for_port_sec_flows(pb, MFF_LOG_INPORT, m);
+        match_set_dl_src(m, ps_addr->ea);
+        match_set_dl_type(m, htons(ETH_TYPE_IPV6));
+
+        if (ps_addr->ipv6_addrs[j].plen == 128
+            || !ipv6_addr_is_host_zero(&ps_addr->ipv6_addrs[j].addr,
+                                        &ps_addr->ipv6_addrs[j].mask)) {
+            match_set_ipv6_src(m, &ps_addr->ipv6_addrs[j].addr);
+        } else {
+            match_set_ipv6_src_masked(m, &ps_addr->ipv6_addrs[j].network,
+                                        &ps_addr->ipv6_addrs[j].mask);
+        }
+
+        ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC, 90,
+                        pb->header_.uuid.parts[0], m, ofpacts,
+                        &pb->header_.uuid);
+    }
+
+    reset_match_for_port_sec_flows(pb, MFF_LOG_INPORT, m);
+    match_set_dl_src(m, ps_addr->ea);
+    match_set_dl_type(m, htons(ETH_TYPE_IPV6));
+
+    struct in6_addr lla;
+    in6_generate_lla(ps_addr->ea, &lla);
+    match_set_ipv6_src(m, &lla);
+
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC, 90,
+                    pb->header_.uuid.parts[0], m, ofpacts,
+                    &pb->header_.uuid);
+
+    /* Add the below logical flow equivalent OF rules in 'in_port_sec_nd'
+     * table.
+     * priority: 90
+     * match - "inport == pb->port && eth.src == ps_addr.ea &&
+     *          ip6.src == :: && ip6.dst == ff02::/16 && icmp6 &&
+     *          icmp6.code == 0 && icmp6.type == {131, 143}"
+     * action - "port_sec_failed = 0;"
+     */
+    build_port_sec_allow_action(ofpacts);
+    match_set_ipv6_src(m, &in6addr_any);
+    struct in6_addr ip6, mask;
+    char *err = ipv6_parse_masked("ff02::/16", &ip6, &mask);
+    ovs_assert(!err);
+
+    match_set_ipv6_dst_masked(m, &ip6, &mask);
+    match_set_nw_proto(m, IPPROTO_ICMPV6);
+    match_set_icmp_type(m, 131);
+    match_set_icmp_code(m, 0);
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC, 90,
+                    pb->header_.uuid.parts[0], m, ofpacts,
+                    &pb->header_.uuid);
+
+    match_set_icmp_type(m, 143);
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC, 90,
+                    pb->header_.uuid.parts[0], m, ofpacts,
+                    &pb->header_.uuid);
+
+    /* Add the below logical flow equivalent OF rules in 'in_port_sec_nd'
+     * table.
+     * priority: 90
+     * match - "inport == pb->port && eth.src == ps_addr.ea &&
+     *          ip6.src == :: && ip6.dst == ff02::/16 && icmp6 &&
+     *          icmp6.code == 0 && icmp6.type == 135"
+     * action - "next;"
+     * description: "Advance the packet for Neighbor solicit check"
+     */
+    build_port_sec_adv_nd_check(ofpacts);
+    match_set_icmp_type(m, 135);
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC, 90,
+                    pb->header_.uuid.parts[0], m, ofpacts,
+                    &pb->header_.uuid);
+}
+
+/* Adds the OF rules to allow IPv6 Neigh discovery packet in
+ * 'in_port_sec_nd' table. */
+static void
+build_in_port_sec_nd_flows(const struct sbrec_port_binding *pb,
+                           struct lport_addresses *ps_addr,
+                           struct match *m, struct ofpbuf *ofpacts,
+                           struct ovn_desired_flow_table *flow_table)
+{
+    build_port_sec_allow_action(ofpacts);
+
+    /* Add the below logical flow equivalent OF rules in 'in_port_sec_nd'
+     * table.
+     * priority: 90
+     * match - "inport == pb->port && eth.src == ps_addr.ea &&
+     *          icmp6 && icmp6.code == 135 && icmp6.type == 0 &&
+     *          ip6.tll == 255 && nd.sll == {00:00:00:00:00:00, ps_addr.ea}"
+     * action - "port_sec_failed = 0;"
+     */
+    reset_match_for_port_sec_flows(pb, MFF_LOG_INPORT, m);
+    match_set_dl_type(m, htons(ETH_TYPE_IPV6));
+    match_set_nw_proto(m, IPPROTO_ICMPV6);
+    match_set_nw_ttl(m, 225);
+    match_set_icmp_type(m, 135);
+    match_set_icmp_code(m, 0);
+
+    match_set_arp_sha(m, eth_addr_zero);
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC_ND, 90,
+                    pb->header_.uuid.parts[0], m, ofpacts,
+                    &pb->header_.uuid);
+
+    match_set_arp_sha(m, ps_addr->ea);
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC_ND, 90,
+                    pb->header_.uuid.parts[0], m, ofpacts,
+                    &pb->header_.uuid);
+
+    match_set_icmp_type(m, 136);
+    match_set_icmp_code(m, 0);
+    if (ps_addr->n_ipv6_addrs) {
+        /* Add the below logical flow equivalent OF rules in 'in_port_sec_nd'
+         * table if IPv6 addresses are configured.
+         * priority: 90
+         * match - "inport == pb->port && eth.src == ps_addr.ea && icmp6 &&
+         *          icmp6.code == 136 && icmp6.type == 0 && ip6.tll == 255 &&
+         *          nd.tll == {00:00:00:00:00:00, ps_addr.ea} &&
+         *          nd.target == {ps_addr.ipv6_addrs, lla}"
+         * action - "port_sec_failed = 0;"
+         */
+        struct in6_addr lla;
+        in6_generate_lla(ps_addr->ea, &lla);
+        match_set_arp_tha(m, eth_addr_zero);
+
+        match_set_nd_target(m, &lla);
+        ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC_ND, 90,
+                        pb->header_.uuid.parts[0], m, ofpacts,
+                        &pb->header_.uuid);
+        match_set_arp_tha(m, ps_addr->ea);
+        match_set_nd_target(m, &lla);
+        ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC_ND, 90,
+                        pb->header_.uuid.parts[0], m, ofpacts,
+                        &pb->header_.uuid);
+
+        for (size_t j = 0; j < ps_addr->n_ipv6_addrs; j++) {
+            reset_match_for_port_sec_flows(pb, MFF_LOG_INPORT, m);
+            match_set_dl_src(m, ps_addr->ea);
+            match_set_dl_type(m, htons(ETH_TYPE_IPV6));
+            match_set_nw_proto(m, IPPROTO_ICMPV6);
+            match_set_icmp_type(m, 136);
+            match_set_icmp_code(m, 0);
+            match_set_arp_tha(m, eth_addr_zero);
+
+            if (ps_addr->ipv6_addrs[j].plen == 128
+                || !ipv6_addr_is_host_zero(&ps_addr->ipv6_addrs[j].addr,
+                                            &ps_addr->ipv6_addrs[j].mask)) {
+                match_set_nd_target(m, &ps_addr->ipv6_addrs[j].addr);
+            } else {
+                match_set_nd_target_masked(m, &ps_addr->ipv6_addrs[j].network,
+                                           &ps_addr->ipv6_addrs[j].mask);
+            }
+
+            ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC_ND, 90,
+                            pb->header_.uuid.parts[0], m, ofpacts,
+                            &pb->header_.uuid);
+
+            match_set_arp_tha(m, ps_addr->ea);
+            ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC_ND, 90,
+                            pb->header_.uuid.parts[0], m, ofpacts,
+                            &pb->header_.uuid);
+        }
+    } else {
+        /* Add the below logical flow equivalent OF rules in 'in_port_sec_nd'
+         * table if no IPv6 addresses are configured.
+         * priority: 90
+         * match - "inport == pb->port && eth.src == ps_addr.ea && icmp6 &&
+         *          icmp6.code == 136 && icmp6.type == 0 && ip6.tll == 255 &&
+         *          nd.tll == {00:00:00:00:00:00, ps_addr.ea}"
+         * action - "port_sec_failed = 0;"
+         */
+        match_set_arp_tha(m, eth_addr_zero);
+        ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC_ND, 90,
+                        pb->header_.uuid.parts[0], m, ofpacts,
+                        &pb->header_.uuid);
+
+        match_set_arp_tha(m, ps_addr->ea);
+        ofctrl_add_flow(flow_table, OFTABLE_CHK_IN_PORT_SEC_ND, 90,
+                        pb->header_.uuid.parts[0], m, ofpacts,
+                        &pb->header_.uuid);
+    }
+}
+
+static void
+build_out_port_sec_no_ip_flows(const struct sbrec_port_binding *pb,
+                               struct lport_addresses *ps_addr,
+                               struct match *m, struct ofpbuf *ofpacts,
+                               struct ovn_desired_flow_table *flow_table)
+{
+    /* Add the below logical flow equivalent OF rules in 'out_port_sec' table.
+     * priority: 85
+     * match - "outport == pb->logical_port && eth.dst == ps_addr.ea"
+     * action - "port_sec_failed = 0;"
+     * description: "Allow the packet if eth.dst matches."
+     */
+    reset_match_for_port_sec_flows(pb, MFF_LOG_OUTPORT, m);
+    match_set_dl_dst(m, ps_addr->ea);
+    build_port_sec_allow_action(ofpacts);
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_OUT_PORT_SEC, 85,
+                    pb->header_.uuid.parts[0], m, ofpacts,
+                    &pb->header_.uuid);
+}
+
+static void
+build_out_port_sec_ip4_flows(const struct sbrec_port_binding *pb,
+                            struct lport_addresses *ps_addr,
+                            struct match *m, struct ofpbuf *ofpacts,
+                            struct ovn_desired_flow_table *flow_table)
+{
+    if (!ps_addr->n_ipv4_addrs && !ps_addr->n_ipv6_addrs) {
+        return;
+    }
+
+    /* Add the below logical flow equivalent OF rules in 'out_port_sec' table.
+     * priority: 90
+     * match - "outport == pb->logical_port && eth.dst == ps_addr.ea && ip4"
+     * action - "port_sec_failed = 1;"
+     * description: Default drop IPv4 packets.  If IPv4 addresses are
+     *              configured, then higher priority flows are added
+     *              to allow specific IPv4 packets.
+     */
+    reset_match_for_port_sec_flows(pb, MFF_LOG_OUTPORT, m);
+    match_set_dl_dst(m, ps_addr->ea);
+    match_set_dl_type(m, htons(ETH_TYPE_IP));
+    build_port_sec_deny_action(ofpacts);
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_OUT_PORT_SEC, 90,
+                    pb->header_.uuid.parts[0], m, ofpacts,
+                    &pb->header_.uuid);
+
+    if (!ps_addr->n_ipv4_addrs) {
+        return;
+    }
+
+    /* Add the below logical flow equivalent OF rules in 'out_port_sec' table.
+     * priority: 95
+     * match - "outport == pb->logical_port && eth.dst == ps_addr.ea &&
+     *          ip4.dst == {ps_addr.ipv4_addrs, 255.255.255.255, 224.0.0.0/4},"
+     * action - "port_sec_failed = 0;"
+     */
+    build_port_sec_allow_action(ofpacts);
+    for (size_t j = 0; j < ps_addr->n_ipv4_addrs; j++) {
+        reset_match_for_port_sec_flows(pb, MFF_LOG_OUTPORT, m);
+        match_set_dl_dst(m, ps_addr->ea);
+        match_set_dl_type(m, htons(ETH_TYPE_IP));
+        ovs_be32 mask = ps_addr->ipv4_addrs[j].mask;
+        if (ps_addr->ipv4_addrs[j].plen == 32
+                || ps_addr->ipv4_addrs[j].addr & ~mask) {
+
+            if (ps_addr->ipv4_addrs[j].plen != 32) {
+                /* Special case to allow bcast traffic.
+                 * Eg. If ps_addr is 10.0.0.4/24, then add the below flow
+                 * priority: 95
+                 * match - "outport == pb->logical_port &&
+                 *          eth.dst == ps_addr.ea &&
+                 *          ip4.dst == 10.0.0.255"
+                 * action - "port_sec_failed = 0;"
+                 */
+                ovs_be32 bcast_addr;
+                ovs_assert(ip_parse(ps_addr->ipv4_addrs[j].bcast_s,
+                                    &bcast_addr));
+                match_set_nw_dst(m, bcast_addr);
+                ofctrl_add_flow(flow_table, OFTABLE_CHK_OUT_PORT_SEC, 95,
+                                pb->header_.uuid.parts[0], m, ofpacts,
+                                &pb->header_.uuid);
+            }
+
+            match_set_nw_dst(m, ps_addr->ipv4_addrs[j].addr);
+        } else {
+            /* host portion is zero */
+            match_set_nw_dst_masked(m, ps_addr->ipv4_addrs[j].addr,
+                                    mask);
+        }
+
+        ofctrl_add_flow(flow_table, OFTABLE_CHK_OUT_PORT_SEC, 95,
+                        pb->header_.uuid.parts[0], m, ofpacts,
+                        &pb->header_.uuid);
+    }
+
+    reset_match_for_port_sec_flows(pb, MFF_LOG_OUTPORT, m);
+    match_set_dl_dst(m, ps_addr->ea);
+    match_set_dl_type(m, htons(ETH_TYPE_IP));
+
+    ovs_be32 ip4 = htonl(0xffffffff);
+    match_set_nw_dst(m, ip4);
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_OUT_PORT_SEC, 95,
+                    pb->header_.uuid.parts[0], m, ofpacts,
+                    &pb->header_.uuid);
+
+    /* Allow 224.0.0.0/4 traffic. */
+    ip4 = htonl(0xe0000000);
+    ovs_be32 mask = htonl(0xf0000000);
+    match_set_nw_dst_masked(m, ip4, mask);
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_OUT_PORT_SEC, 95,
+                    pb->header_.uuid.parts[0], m, ofpacts,
+                    &pb->header_.uuid);
+}
+
+static void
+build_out_port_sec_ip6_flows(const struct sbrec_port_binding *pb,
+                            struct lport_addresses *ps_addr,
+                            struct match *m, struct ofpbuf *ofpacts,
+                            struct ovn_desired_flow_table *flow_table)
+{
+    if (!ps_addr->n_ipv4_addrs && !ps_addr->n_ipv6_addrs) {
+        return;
+    }
+
+    /* Add the below logical flow equivalent OF rules in 'out_port_sec' table.
+     * priority: 90
+     * match - "outport == pb->logical_port && eth.dst == ps_addr.ea && ip6"
+     * action - "port_sec_failed = 1;"
+     * description: Default drop IPv6 packets.  If IPv6 addresses are
+     *              configured, then higher priority flows are added
+     *              to allow specific IPv6 packets.
+     */
+    reset_match_for_port_sec_flows(pb, MFF_LOG_OUTPORT, m);
+    match_set_dl_dst(m, ps_addr->ea);
+    match_set_dl_type(m, htons(ETH_TYPE_IPV6));
+    build_port_sec_deny_action(ofpacts);
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_OUT_PORT_SEC, 90,
+                    pb->header_.uuid.parts[0], m, ofpacts,
+                    &pb->header_.uuid);
+
+    if (!ps_addr->n_ipv6_addrs) {
+        return;
+    }
+
+    /* Add the below logical flow equivalent OF rules in 'out_port_sec' table.
+     * priority: 95
+     * match - "outport == pb->logical_port && eth.dst == ps_addr.ea &&
+     *          ip6.dst == {ps_addr.ipv6_addrs, lla, ff00::/8},"
+     * action - "port_sec_failed = 0;"
+     */
+    build_port_sec_allow_action(ofpacts);
+    for (size_t j = 0; j < ps_addr->n_ipv6_addrs; j++) {
+        reset_match_for_port_sec_flows(pb, MFF_LOG_OUTPORT, m);
+        match_set_dl_dst(m, ps_addr->ea);
+        match_set_dl_type(m, htons(ETH_TYPE_IPV6));
+
+        if (ps_addr->ipv6_addrs[j].plen == 128
+            || !ipv6_addr_is_host_zero(&ps_addr->ipv6_addrs[j].addr,
+                                        &ps_addr->ipv6_addrs[j].mask)) {
+            match_set_ipv6_dst(m, &ps_addr->ipv6_addrs[j].addr);
+        } else {
+            match_set_ipv6_dst_masked(m, &ps_addr->ipv6_addrs[j].network,
+                                      &ps_addr->ipv6_addrs[j].mask);
+        }
+
+        ofctrl_add_flow(flow_table, OFTABLE_CHK_OUT_PORT_SEC, 95,
+                        pb->header_.uuid.parts[0], m, ofpacts,
+                        &pb->header_.uuid);
+    }
+
+    struct in6_addr lla;
+    in6_generate_lla(ps_addr->ea, &lla);
+
+    reset_match_for_port_sec_flows(pb, MFF_LOG_OUTPORT, m);
+    match_set_dl_dst(m, ps_addr->ea);
+    match_set_dl_type(m, htons(ETH_TYPE_IPV6));
+    match_set_ipv6_dst(m, &lla);
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_OUT_PORT_SEC, 95,
+                    pb->header_.uuid.parts[0], m, ofpacts,
+                    &pb->header_.uuid);
+
+    struct in6_addr ip6, mask;
+    char *err = ipv6_parse_masked("ff00::/8", &ip6, &mask);
+    ovs_assert(!err);
+
+    match_set_ipv6_dst_masked(m, &ip6, &mask);
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_OUT_PORT_SEC, 95,
+                    pb->header_.uuid.parts[0], m, ofpacts,
+                    &pb->header_.uuid);
+}
+
+static void
+consider_port_sec_flows(const struct sbrec_port_binding *pb,
+                        struct ovn_desired_flow_table *flow_table)
+{
+    if (!pb->n_port_security) {
+        return;
+    }
+
+    struct lport_addresses *ps_addrs;   /* Port security addresses. */
+    size_t n_ps_addrs = 0;
+
+    ps_addrs = xmalloc(sizeof *ps_addrs * pb->n_port_security);
+    for (size_t i = 0; i < pb->n_port_security; i++) {
+        if (!extract_lsp_addresses(pb->port_security[i],
+                                    &ps_addrs[n_ps_addrs])) {
+            static struct vlog_rate_limit rl
+                = VLOG_RATE_LIMIT_INIT(1, 1);
+            VLOG_INFO_RL(&rl, "invalid syntax '%s' in port "
+                         "security. No MAC address found",
+                         pb->port_security[i]);
+            continue;
+        }
+        n_ps_addrs++;
+    }
+
+    if (!n_ps_addrs) {
+        free(ps_addrs);
+        return;
+    }
+
+    struct match match = MATCH_CATCHALL_INITIALIZER;
+    uint64_t stub[1024 / 8];
+    struct ofpbuf ofpacts = OFPBUF_STUB_INITIALIZER(stub);
+
+    build_in_port_sec_default_flows(pb, &match, &ofpacts, flow_table);
+
+    for (size_t i = 0; i < n_ps_addrs; i++) {
+        build_in_port_sec_no_ip_flows(pb, &ps_addrs[i], &match, &ofpacts,
+                                      flow_table);
+        build_in_port_sec_ip4_flows(pb, &ps_addrs[i], &match, &ofpacts,
+                                    flow_table);
+        build_in_port_sec_arp_flows(pb, &ps_addrs[i], &match, &ofpacts,
+                                    flow_table);
+        build_in_port_sec_ip6_flows(pb, &ps_addrs[i], &match, &ofpacts,
+                                    flow_table);
+        build_in_port_sec_nd_flows(pb, &ps_addrs[i], &match, &ofpacts,
+                                   flow_table);
+    }
+
+    /* Out port security. */
+
+    /* Add the below logical flow equivalent OF rules in 'out_port_sec_nd'
+     * table.
+     * priority: 80
+     * match - "outport == pb->logical_port"
+     * action - "port_sec_failed = 1;"
+     * descrption: "Drop all traffic"
+     */
+    reset_match_for_port_sec_flows(pb, MFF_LOG_OUTPORT, &match);
+    build_port_sec_deny_action(&ofpacts);
+    ofctrl_add_flow(flow_table, OFTABLE_CHK_OUT_PORT_SEC, 80,
+                    pb->header_.uuid.parts[0], &match, &ofpacts,
+                    &pb->header_.uuid);
+
+    for (size_t i = 0; i < n_ps_addrs; i++) {
+        build_out_port_sec_no_ip_flows(pb, &ps_addrs[i], &match, &ofpacts,
+                                       flow_table);
+        build_out_port_sec_ip4_flows(pb, &ps_addrs[i], &match, &ofpacts,
+                                       flow_table);
+        build_out_port_sec_ip6_flows(pb, &ps_addrs[i], &match, &ofpacts,
+                                       flow_table);
+    }
+
+    ofpbuf_uninit(&ofpacts);
+    for (size_t i = 0; i < n_ps_addrs; i++) {
+        destroy_lport_addresses(&ps_addrs[i]);
+    }
+    free(ps_addrs);
+}
diff --git a/controller/lflow.h b/controller/lflow.h
index ba2efcebda..ad9449d3ac 100644
--- a/controller/lflow.h
+++ b/controller/lflow.h
@@ -76,6 +76,9 @@  struct uuid;
 #define OFTABLE_CT_SNAT_HAIRPIN      70
 #define OFTABLE_GET_FDB              71
 #define OFTABLE_LOOKUP_FDB           72
+#define OFTABLE_CHK_IN_PORT_SEC      73
+#define OFTABLE_CHK_IN_PORT_SEC_ND   74
+#define OFTABLE_CHK_OUT_PORT_SEC     75
 
 enum ref_type {
     REF_TYPE_ADDRSET,
@@ -155,6 +158,7 @@  struct lflow_ctx_in {
     const struct shash *port_groups;
     const struct sset *active_tunnels;
     const struct sset *related_lport_ids;
+    const struct shash *binding_lports;
     const struct hmap *chassis_tunnels;
     bool check_ct_label_for_lb_hairpin;
 };
diff --git a/controller/ovn-controller.c b/controller/ovn-controller.c
index 5a6274eb23..e662756e10 100644
--- a/controller/ovn-controller.c
+++ b/controller/ovn-controller.c
@@ -1959,7 +1959,8 @@  ct_zones_runtime_data_handler(struct engine_node *node, void *data)
                 continue;
             }
 
-            if (t_lport->tracked_type == TRACKED_RESOURCE_NEW) {
+            if (t_lport->tracked_type == TRACKED_RESOURCE_NEW ||
+                t_lport->tracked_type == TRACKED_RESOURCE_UPDATED) {
                 if (!simap_contains(&ct_zones_data->current,
                                     t_lport->pb->logical_port)) {
                     alloc_id_to_ct_zone(t_lport->pb->logical_port,
@@ -1981,8 +1982,6 @@  ct_zones_runtime_data_handler(struct engine_node *node, void *data)
                     simap_delete(&ct_zones_data->current, ct_zone);
                     updated = true;
                 }
-            } else {
-                OVS_NOT_REACHED();
             }
         }
     }
@@ -2437,6 +2436,7 @@  init_lflow_ctx(struct engine_node *node,
     l_ctx_in->port_groups = port_groups;
     l_ctx_in->active_tunnels = &rt_data->active_tunnels;
     l_ctx_in->related_lport_ids = &rt_data->related_lports.lport_ids;
+    l_ctx_in->binding_lports = &rt_data->lbinding_data.lports;
     l_ctx_in->chassis_tunnels = &non_vif_data->chassis_tunnels;
     l_ctx_in->check_ct_label_for_lb_hairpin =
         get_check_ct_label_for_lb_hairpin(n_ver->ver);
@@ -2791,14 +2791,13 @@  lflow_output_runtime_data_handler(struct engine_node *node,
                     &l_ctx_in, &l_ctx_out)) {
                 return false;
             }
-        } else {
-            struct shash_node *shash_node;
-            SHASH_FOR_EACH (shash_node, &tdp->lports) {
-                struct tracked_lport *lport = shash_node->data;
-                if (!lflow_handle_flows_for_lport(lport->pb, &l_ctx_in,
-                                                  &l_ctx_out)) {
-                    return false;
-                }
+        }
+        struct shash_node *shash_node;
+        SHASH_FOR_EACH (shash_node, &tdp->lports) {
+            struct tracked_lport *lport = shash_node->data;
+            if (!lflow_handle_flows_for_lport(lport->pb, &l_ctx_in,
+                                                &l_ctx_out)) {
+                return false;
             }
         }
     }
diff --git a/include/ovn/actions.h b/include/ovn/actions.h
index f55d77d479..670e887a18 100644
--- a/include/ovn/actions.h
+++ b/include/ovn/actions.h
@@ -806,6 +806,10 @@  struct ovnact_encode_params {
                          * 'get_fdb' to resubmit. */
     uint8_t fdb_lookup_ptable; /* OpenFlow table for
                                 * 'lookup_fdb' to resubmit. */
+    uint8_t in_port_sec_ptable; /* OpenFlow table for
+                                * 'check_in_port_sec' to resubmit. */
+    uint8_t out_port_sec_ptable; /* OpenFlow table for
+                                * 'check_out_port_sec' to resubmit. */
     uint32_t ctrl_meter_id;     /* Meter to be used if the resulting flow
                                    sends packets to controller. */
 };
diff --git a/include/ovn/logical-fields.h b/include/ovn/logical-fields.h
index 25b5a62a3e..35ed771858 100644
--- a/include/ovn/logical-fields.h
+++ b/include/ovn/logical-fields.h
@@ -71,6 +71,7 @@  enum mff_log_flags_bits {
     MLF_SKIP_SNAT_FOR_LB_BIT = 9,
     MLF_LOCALPORT_BIT = 10,
     MLF_USE_SNAT_ZONE = 11,
+    MLF_CHECK_PORT_SEC_BIT = 12,
 };
 
 /* MFF_LOG_FLAGS_REG flag assignments */
diff --git a/ovn-sb.ovsschema b/ovn-sb.ovsschema
index 66664c840f..8609521944 100644
--- a/ovn-sb.ovsschema
+++ b/ovn-sb.ovsschema
@@ -1,7 +1,7 @@ 
 {
     "name": "OVN_Southbound",
-    "version": "20.22.0",
-    "cksum": "1686121686 27471",
+    "version": "20.23.0",
+    "cksum": "2468078434 27629",
     "tables": {
         "SB_Global": {
             "columns": {
@@ -225,6 +225,9 @@ 
                 "mac": {"type": {"key": "string",
                                  "min": 0,
                                  "max": "unlimited"}},
+                "port_security": {"type": {"key": "string",
+                                 "min": 0,
+                                 "max": "unlimited"}},
                 "nat_addresses": {"type": {"key": "string",
                                            "min": 0,
                                            "max": "unlimited"}},
diff --git a/ovn-sb.xml b/ovn-sb.xml
index b0b11b8ee3..086edddef4 100644
--- a/ovn-sb.xml
+++ b/ovn-sb.xml
@@ -2969,6 +2969,21 @@  tcp.flags = RST;
         follows the same format as that column.
       </column>
 
+      <column name="port_security">
+        <p>
+          This column controls the addresses from which the host attached to
+          the logical port (``the host'') is allowed to send packets and to
+          which it is allowed to receive packets.  If this column is empty,
+          all addresses are permitted.
+        </p>
+
+        <p>
+          It is copied from the <code>port_security</code> column in the
+          <code>Logical_Switch_Port</code> table in the Northbound database. It
+          follows the same format as that column.
+        </p>
+      </column>
+
       <column name="type">
         <p>
           A type for this logical port.  Logical ports can be used to model other
diff --git a/tests/ovn.at b/tests/ovn.at
index 6a0a169c1a..c2026a9482 100644
--- a/tests/ovn.at
+++ b/tests/ovn.at
@@ -30595,3 +30595,291 @@  test_mac_binding_flows hv2 00\:00\:00\:00\:10\:10 1
 OVN_CLEANUP([hv1], [hv2])
 AT_CLEANUP
 ])
+
+OVN_FOR_EACH_NORTHD([
+AT_SETUP([ovn-controller port security OF flows])
+ovn_start
+
+net_add n1
+
+# create two hypervisors, each with one vif port
+sim_add hv1
+as hv1
+ovs-vsctl add-br br-phys
+ovn_attach n1 br-phys 192.168.0.11
+
+sim_add hv2
+as hv2
+ovs-vsctl add-br br-phys
+ovn_attach n1 br-phys 192.168.0.12
+
+check ovn-nbctl ls-add sw0
+check ovn-nbctl lsp-add sw0 sw0p1 -- lsp-set-addresses sw0p1 "00:00:00:00:00:03 10.0.0.3"
+check ovn-nbctl lsp-add sw0 sw0p2 -- lsp-set-addresses sw0p2 "00:00:00:00:00:04 10.0.0.4"
+
+as hv1
+ovs-vsctl -- add-port br-int hv1-vif0 -- \
+set Interface hv1-vif0 external-ids:iface-id=sw0p1 ofport-request=1
+
+wait_for_ports_up sw0p1
+
+sw0_dp_key=$(printf "%x" $(fetch_column Datapath_Binding tunnel_key external_ids:name=sw0))
+sw0p1_key=$(printf "%x" $(fetch_column Port_Binding tunnel_key logical_port=sw0p1))
+sw0p2_key=$(printf "%x" $(fetch_column Port_Binding tunnel_key logical_port=sw0p2))
+
+# There should be no flows in table 73, 74 and 75 in hv1 and hv2
+> hv1_t73_flows.expected
+> hv1_t74_flows.expected
+> hv1_t75_flows.expected
+
+> hv2_t73_flows.expected
+> hv2_t74_flows.expected
+> hv2_t75_flows.expected
+
+check_port_sec_offlows hv1 73
+check_port_sec_offlows hv1 74
+check_port_sec_offlows hv1 75
+
+check_port_sec_offlows hv2 73
+check_port_sec_offlows hv2 74
+check_port_sec_offlows hv2 75
+
+# Set port security for sw0p1
+check ovn-sbctl set port-binding sw0p1 port_security='"00:00:00:00:00:03"'
+check ovn-nbctl --wait=hv sync
+
+check_port_sec_offlows() {
+    hv=$1
+    t=$2
+
+    as $hv ovs-ofctl dump-flows br-int table=${t} | ofctl_strip_all | sort | grep -v NXST_FLOW > ${hv}_t${t}_flows.actual
+    AT_CHECK([diff -u ${hv}_t${t}_flows.actual ${hv}_t${t}_flows.expected])
+}
+
+echo " table=73, priority=80,reg14=0x$sw0p1_key,metadata=0x$sw0_dp_key actions=load:0x1->NXM_NX_REG10[[12]]
+ table=73, priority=90,reg14=0x$sw0p1_key,metadata=0x$sw0_dp_key,dl_src=00:00:00:00:00:03 actions=resubmit(,74)
+ table=73, priority=95,arp,reg14=0x1,metadata=0x$sw0_dp_key actions=resubmit(,74)" > hv1_t73_flows.expected
+
+check_port_sec_offlows hv1 73
+
+echo " table=74, priority=80,arp,reg14=0x1,metadata=0x1 actions=load:0x1->NXM_NX_REG10[[12]]
+ table=74, priority=80,icmp6,reg14=0x1,metadata=0x1,nw_ttl=255,icmp_type=135 actions=load:0->NXM_NX_REG10[[12]]
+ table=74, priority=80,icmp6,reg14=0x1,metadata=0x1,nw_ttl=255,icmp_type=136 actions=load:0x1->NXM_NX_REG10[[12]]
+ table=74, priority=90,arp,reg14=0x1,metadata=0x1,dl_src=00:00:00:00:00:03,arp_sha=00:00:00:00:00:03 actions=load:0->NXM_NX_REG10[[12]]
+ table=74, priority=90,icmp6,reg14=0x1,metadata=0x1,nw_ttl=225,icmp_type=135,icmp_code=0,nd_sll=00:00:00:00:00:00 actions=load:0->NXM_NX_REG10[[12]]
+ table=74, priority=90,icmp6,reg14=0x1,metadata=0x1,nw_ttl=225,icmp_type=135,icmp_code=0,nd_sll=00:00:00:00:00:03 actions=load:0->NXM_NX_REG10[[12]]
+ table=74, priority=90,icmp6,reg14=0x1,metadata=0x1,nw_ttl=225,icmp_type=136,icmp_code=0,nd_tll=00:00:00:00:00:00 actions=load:0->NXM_NX_REG10[[12]]
+ table=74, priority=90,icmp6,reg14=0x1,metadata=0x1,nw_ttl=225,icmp_type=136,icmp_code=0,nd_tll=00:00:00:00:00:03 actions=load:0->NXM_NX_REG10[[12]]" > hv1_t74_flows.expected
+
+check_port_sec_offlows hv1 74
+
+echo " table=75, priority=80,reg15=0x1,metadata=0x1 actions=load:0x1->NXM_NX_REG10[[12]]
+ table=75, priority=85,reg15=0x1,metadata=0x1,dl_dst=00:00:00:00:00:03 actions=load:0->NXM_NX_REG10[[12]]" > hv1_t75_flows.expected
+
+check_port_sec_offlows hv1 75
+
+> hv2_t73_flows.expected
+> hv2_t74_flows.expected
+> hv2_t75_flows.expected
+
+check_port_sec_offlows hv2 73
+check_port_sec_offlows hv2 74
+check_port_sec_offlows hv2 75
+
+# Add IPv4 addresses to sw0p1
+check ovn-sbctl set port-binding sw0p1 port_security='"00:00:00:00:00:03 10.0.0.3" "00:00:00:00:00:13 10.0.0.13"'
+check ovn-nbctl --wait=hv sync
+
+echo " table=73, priority=80,reg14=0x1,metadata=0x1 actions=load:0x1->NXM_NX_REG10[[12]]
+ table=73, priority=90,ip,reg14=0x1,metadata=0x1,dl_src=00:00:00:00:00:03,nw_src=10.0.0.3 actions=load:0->NXM_NX_REG10[[12]]
+ table=73, priority=90,ip,reg14=0x1,metadata=0x1,dl_src=00:00:00:00:00:13,nw_src=10.0.0.13 actions=load:0->NXM_NX_REG10[[12]]
+ table=73, priority=90,udp,reg14=0x1,metadata=0x1,dl_src=00:00:00:00:00:03,nw_src=0.0.0.0,nw_dst=255.255.255.255,tp_src=68,tp_dst=67 actions=load:0->NXM_NX_REG10[[12]]
+ table=73, priority=90,udp,reg14=0x1,metadata=0x1,dl_src=00:00:00:00:00:13,nw_src=0.0.0.0,nw_dst=255.255.255.255,tp_src=68,tp_dst=67 actions=load:0->NXM_NX_REG10[[12]]
+ table=73, priority=95,arp,reg14=0x1,metadata=0x1 actions=resubmit(,74)" > hv1_t73_flows.expected
+
+check_port_sec_offlows hv1 73
+
+echo " table=74, priority=80,arp,reg14=0x1,metadata=0x1 actions=load:0x1->NXM_NX_REG10[[12]]
+ table=74, priority=80,icmp6,reg14=0x1,metadata=0x1,nw_ttl=255,icmp_type=135 actions=load:0->NXM_NX_REG10[[12]]
+ table=74, priority=80,icmp6,reg14=0x1,metadata=0x1,nw_ttl=255,icmp_type=136 actions=load:0x1->NXM_NX_REG10[[12]]
+ table=74, priority=90,arp,reg14=0x1,metadata=0x1,dl_src=00:00:00:00:00:03,arp_spa=10.0.0.3,arp_sha=00:00:00:00:00:03 actions=load:0->NXM_NX_REG10[[12]]
+ table=74, priority=90,arp,reg14=0x1,metadata=0x1,dl_src=00:00:00:00:00:13,arp_spa=10.0.0.13,arp_sha=00:00:00:00:00:13 actions=load:0->NXM_NX_REG10[[12]]
+ table=74, priority=90,icmp6,reg14=0x1,metadata=0x1,nw_ttl=225,icmp_type=135,icmp_code=0,nd_sll=00:00:00:00:00:00 actions=load:0->NXM_NX_REG10[[12]]
+ table=74, priority=90,icmp6,reg14=0x1,metadata=0x1,nw_ttl=225,icmp_type=135,icmp_code=0,nd_sll=00:00:00:00:00:03 actions=load:0->NXM_NX_REG10[[12]]
+ table=74, priority=90,icmp6,reg14=0x1,metadata=0x1,nw_ttl=225,icmp_type=135,icmp_code=0,nd_sll=00:00:00:00:00:13 actions=load:0->NXM_NX_REG10[[12]]
+ table=74, priority=90,icmp6,reg14=0x1,metadata=0x1,nw_ttl=225,icmp_type=136,icmp_code=0,nd_tll=00:00:00:00:00:00 actions=load:0->NXM_NX_REG10[[12]]
+ table=74, priority=90,icmp6,reg14=0x1,metadata=0x1,nw_ttl=225,icmp_type=136,icmp_code=0,nd_tll=00:00:00:00:00:03 actions=load:0->NXM_NX_REG10[[12]]
+ table=74, priority=90,icmp6,reg14=0x1,metadata=0x1,nw_ttl=225,icmp_type=136,icmp_code=0,nd_tll=00:00:00:00:00:13 actions=load:0->NXM_NX_REG10[[12]]" > hv1_t74_flows.expected
+
+check_port_sec_offlows hv1 74
+
+echo " table=75, priority=80,reg15=0x1,metadata=0x1 actions=load:0x1->NXM_NX_REG10[[12]]
+ table=75, priority=85,reg15=0x1,metadata=0x1,dl_dst=00:00:00:00:00:03 actions=load:0->NXM_NX_REG10[[12]]
+ table=75, priority=85,reg15=0x1,metadata=0x1,dl_dst=00:00:00:00:00:13 actions=load:0->NXM_NX_REG10[[12]]
+ table=75, priority=90,ip,reg15=0x1,metadata=0x1,dl_dst=00:00:00:00:00:03 actions=load:0x1->NXM_NX_REG10[[12]]
+ table=75, priority=90,ip,reg15=0x1,metadata=0x1,dl_dst=00:00:00:00:00:13 actions=load:0x1->NXM_NX_REG10[[12]]
+ table=75, priority=90,ipv6,reg15=0x1,metadata=0x1,dl_dst=00:00:00:00:00:03 actions=load:0x1->NXM_NX_REG10[[12]]
+ table=75, priority=90,ipv6,reg15=0x1,metadata=0x1,dl_dst=00:00:00:00:00:13 actions=load:0x1->NXM_NX_REG10[[12]]
+ table=75, priority=95,ip,reg15=0x1,metadata=0x1,dl_dst=00:00:00:00:00:03,nw_dst=10.0.0.3 actions=load:0->NXM_NX_REG10[[12]]
+ table=75, priority=95,ip,reg15=0x1,metadata=0x1,dl_dst=00:00:00:00:00:03,nw_dst=224.0.0.0/4 actions=load:0->NXM_NX_REG10[[12]]
+ table=75, priority=95,ip,reg15=0x1,metadata=0x1,dl_dst=00:00:00:00:00:03,nw_dst=255.255.255.255 actions=load:0->NXM_NX_REG10[[12]]
+ table=75, priority=95,ip,reg15=0x1,metadata=0x1,dl_dst=00:00:00:00:00:13,nw_dst=10.0.0.13 actions=load:0->NXM_NX_REG10[[12]]
+ table=75, priority=95,ip,reg15=0x1,metadata=0x1,dl_dst=00:00:00:00:00:13,nw_dst=224.0.0.0/4 actions=load:0->NXM_NX_REG10[[12]]
+ table=75, priority=95,ip,reg15=0x1,metadata=0x1,dl_dst=00:00:00:00:00:13,nw_dst=255.255.255.255 actions=load:0->NXM_NX_REG10[[12]]" > hv1_t75_flows.expected
+
+check_port_sec_offlows hv1 75
+
+check_port_sec_offlows hv2 73
+check_port_sec_offlows hv2 74
+check_port_sec_offlows hv2 75
+
+# Configure IPv4 and IPv6 addresses in sw0p2
+check ovn-sbctl set port-binding sw0p2 port_security='"00:00:00:00:00:04 10.0.0.4 20.0.0.4/24 30.0.0.0/16 1000::4 2000::/64" "00:00:00:00:00:13 aef0::4"'
+
+# There should be no changes in hv1 and hv2 as sw0p2 is not claimed.
+check_port_sec_offlows hv1 73
+check_port_sec_offlows hv1 74
+check_port_sec_offlows hv1 75
+
+check_port_sec_offlows hv2 73
+check_port_sec_offlows hv2 74
+check_port_sec_offlows hv2 75
+
+as hv2
+ovs-vsctl -- add-port br-int hv2-vif0 -- \
+set Interface hv2-vif0 external-ids:iface-id=sw0p2 ofport-request=1
+
+wait_for_ports_up
+# There should be no changes in hv1
+check_port_sec_offlows hv1 73
+check_port_sec_offlows hv1 74
+check_port_sec_offlows hv1 75
+
+#hv2 ovn-controller should program flows.
+echo " table=73, priority=80,reg14=0x2,metadata=0x1 actions=load:0x1->NXM_NX_REG10[[12]]
+ table=73, priority=90,icmp6,reg14=0x2,metadata=0x1,dl_src=00:00:00:00:00:04,ipv6_src=::,ipv6_dst=ff02::/16,icmp_type=131,icmp_code=0 actions=load:0->NXM_NX_REG10[[12]]
+ table=73, priority=90,icmp6,reg14=0x2,metadata=0x1,dl_src=00:00:00:00:00:04,ipv6_src=::,ipv6_dst=ff02::/16,icmp_type=135,icmp_code=0 actions=resubmit(,74)
+ table=73, priority=90,icmp6,reg14=0x2,metadata=0x1,dl_src=00:00:00:00:00:04,ipv6_src=::,ipv6_dst=ff02::/16,icmp_type=143,icmp_code=0 actions=load:0->NXM_NX_REG10[[12]]
+ table=73, priority=90,icmp6,reg14=0x2,metadata=0x1,dl_src=00:00:00:00:00:13,ipv6_src=::,ipv6_dst=ff02::/16,icmp_type=131,icmp_code=0 actions=load:0->NXM_NX_REG10[[12]]
+ table=73, priority=90,icmp6,reg14=0x2,metadata=0x1,dl_src=00:00:00:00:00:13,ipv6_src=::,ipv6_dst=ff02::/16,icmp_type=135,icmp_code=0 actions=resubmit(,74)
+ table=73, priority=90,icmp6,reg14=0x2,metadata=0x1,dl_src=00:00:00:00:00:13,ipv6_src=::,ipv6_dst=ff02::/16,icmp_type=143,icmp_code=0 actions=load:0->NXM_NX_REG10[[12]]
+ table=73, priority=90,ip,reg14=0x2,metadata=0x1,dl_src=00:00:00:00:00:04,nw_src=10.0.0.4 actions=load:0->NXM_NX_REG10[[12]]
+ table=73, priority=90,ip,reg14=0x2,metadata=0x1,dl_src=00:00:00:00:00:04,nw_src=20.0.0.4 actions=load:0->NXM_NX_REG10[[12]]
+ table=73, priority=90,ip,reg14=0x2,metadata=0x1,dl_src=00:00:00:00:00:04,nw_src=30.0.0.0/16 actions=load:0->NXM_NX_REG10[[12]]
+ table=73, priority=90,ipv6,reg14=0x2,metadata=0x1,dl_src=00:00:00:00:00:04,ipv6_src=1000::4 actions=resubmit(,74)
+ table=73, priority=90,ipv6,reg14=0x2,metadata=0x1,dl_src=00:00:00:00:00:04,ipv6_src=2000::/64 actions=resubmit(,74)
+ table=73, priority=90,ipv6,reg14=0x2,metadata=0x1,dl_src=00:00:00:00:00:04,ipv6_src=fe80::200:ff:fe00:4 actions=resubmit(,74)
+ table=73, priority=90,ipv6,reg14=0x2,metadata=0x1,dl_src=00:00:00:00:00:13,ipv6_src=aef0::4 actions=resubmit(,74)
+ table=73, priority=90,ipv6,reg14=0x2,metadata=0x1,dl_src=00:00:00:00:00:13,ipv6_src=fe80::200:ff:fe00:13 actions=resubmit(,74)
+ table=73, priority=90,udp,reg14=0x2,metadata=0x1,dl_src=00:00:00:00:00:04,nw_src=0.0.0.0,nw_dst=255.255.255.255,tp_src=68,tp_dst=67 actions=load:0->NXM_NX_REG10[[12]]
+ table=73, priority=95,arp,reg14=0x2,metadata=0x1 actions=resubmit(,74)" > hv2_t73_flows.expected
+
+check_port_sec_offlows hv2 73
+
+echo " table=74, priority=80,arp,reg14=0x2,metadata=0x1 actions=load:0x1->NXM_NX_REG10[[12]]
+ table=74, priority=80,icmp6,reg14=0x2,metadata=0x1,nw_ttl=255,icmp_type=135 actions=load:0->NXM_NX_REG10[[12]]
+ table=74, priority=80,icmp6,reg14=0x2,metadata=0x1,nw_ttl=255,icmp_type=136 actions=load:0x1->NXM_NX_REG10[[12]]
+ table=74, priority=90,arp,reg14=0x2,metadata=0x1,dl_src=00:00:00:00:00:04,arp_spa=10.0.0.4,arp_sha=00:00:00:00:00:04 actions=load:0->NXM_NX_REG10[[12]]
+ table=74, priority=90,arp,reg14=0x2,metadata=0x1,dl_src=00:00:00:00:00:04,arp_spa=20.0.0.4,arp_sha=00:00:00:00:00:04 actions=load:0->NXM_NX_REG10[[12]]
+ table=74, priority=90,arp,reg14=0x2,metadata=0x1,dl_src=00:00:00:00:00:04,arp_spa=30.0.0.0/16,arp_sha=00:00:00:00:00:04 actions=load:0->NXM_NX_REG10[[12]]
+ table=74, priority=90,icmp6,reg14=0x2,metadata=0x1,dl_src=00:00:00:00:00:04,icmp_type=136,icmp_code=0,nd_target=1000::4,nd_tll=00:00:00:00:00:00 actions=load:0->NXM_NX_REG10[[12]]
+ table=74, priority=90,icmp6,reg14=0x2,metadata=0x1,dl_src=00:00:00:00:00:04,icmp_type=136,icmp_code=0,nd_target=1000::4,nd_tll=00:00:00:00:00:04 actions=load:0->NXM_NX_REG10[[12]]
+ table=74, priority=90,icmp6,reg14=0x2,metadata=0x1,dl_src=00:00:00:00:00:04,icmp_type=136,icmp_code=0,nd_target=2000::/64,nd_tll=00:00:00:00:00:00 actions=load:0->NXM_NX_REG10[[12]]
+ table=74, priority=90,icmp6,reg14=0x2,metadata=0x1,dl_src=00:00:00:00:00:04,icmp_type=136,icmp_code=0,nd_target=2000::/64,nd_tll=00:00:00:00:00:04 actions=load:0->NXM_NX_REG10[[12]]
+ table=74, priority=90,icmp6,reg14=0x2,metadata=0x1,dl_src=00:00:00:00:00:13,icmp_type=136,icmp_code=0,nd_target=aef0::4,nd_tll=00:00:00:00:00:00 actions=load:0->NXM_NX_REG10[[12]]
+ table=74, priority=90,icmp6,reg14=0x2,metadata=0x1,dl_src=00:00:00:00:00:13,icmp_type=136,icmp_code=0,nd_target=aef0::4,nd_tll=00:00:00:00:00:13 actions=load:0->NXM_NX_REG10[[12]]
+ table=74, priority=90,icmp6,reg14=0x2,metadata=0x1,nw_ttl=225,icmp_type=135,icmp_code=0,nd_sll=00:00:00:00:00:00 actions=load:0->NXM_NX_REG10[[12]]
+ table=74, priority=90,icmp6,reg14=0x2,metadata=0x1,nw_ttl=225,icmp_type=135,icmp_code=0,nd_sll=00:00:00:00:00:04 actions=load:0->NXM_NX_REG10[[12]]
+ table=74, priority=90,icmp6,reg14=0x2,metadata=0x1,nw_ttl=225,icmp_type=135,icmp_code=0,nd_sll=00:00:00:00:00:13 actions=load:0->NXM_NX_REG10[[12]]
+ table=74, priority=90,icmp6,reg14=0x2,metadata=0x1,nw_ttl=225,icmp_type=136,icmp_code=0,nd_target=fe80::200:ff:fe00:13,nd_tll=00:00:00:00:00:00 actions=load:0->NXM_NX_REG10[[12]]
+ table=74, priority=90,icmp6,reg14=0x2,metadata=0x1,nw_ttl=225,icmp_type=136,icmp_code=0,nd_target=fe80::200:ff:fe00:13,nd_tll=00:00:00:00:00:13 actions=load:0->NXM_NX_REG10[[12]]
+ table=74, priority=90,icmp6,reg14=0x2,metadata=0x1,nw_ttl=225,icmp_type=136,icmp_code=0,nd_target=fe80::200:ff:fe00:4,nd_tll=00:00:00:00:00:00 actions=load:0->NXM_NX_REG10[[12]]
+ table=74, priority=90,icmp6,reg14=0x2,metadata=0x1,nw_ttl=225,icmp_type=136,icmp_code=0,nd_target=fe80::200:ff:fe00:4,nd_tll=00:00:00:00:00:04 actions=load:0->NXM_NX_REG10[[12]]" > hv2_t74_flows.expected
+
+check_port_sec_offlows hv2 74
+
+echo " table=75, priority=80,reg15=0x2,metadata=0x1 actions=load:0x1->NXM_NX_REG10[[12]]
+ table=75, priority=85,reg15=0x2,metadata=0x1,dl_dst=00:00:00:00:00:04 actions=load:0->NXM_NX_REG10[[12]]
+ table=75, priority=85,reg15=0x2,metadata=0x1,dl_dst=00:00:00:00:00:13 actions=load:0->NXM_NX_REG10[[12]]
+ table=75, priority=90,ip,reg15=0x2,metadata=0x1,dl_dst=00:00:00:00:00:04 actions=load:0x1->NXM_NX_REG10[[12]]
+ table=75, priority=90,ip,reg15=0x2,metadata=0x1,dl_dst=00:00:00:00:00:13 actions=load:0x1->NXM_NX_REG10[[12]]
+ table=75, priority=90,ipv6,reg15=0x2,metadata=0x1,dl_dst=00:00:00:00:00:04 actions=load:0x1->NXM_NX_REG10[[12]]
+ table=75, priority=90,ipv6,reg15=0x2,metadata=0x1,dl_dst=00:00:00:00:00:13 actions=load:0x1->NXM_NX_REG10[[12]]
+ table=75, priority=95,ip,reg15=0x2,metadata=0x1,dl_dst=00:00:00:00:00:04,nw_dst=10.0.0.4 actions=load:0->NXM_NX_REG10[[12]]
+ table=75, priority=95,ip,reg15=0x2,metadata=0x1,dl_dst=00:00:00:00:00:04,nw_dst=20.0.0.255 actions=load:0->NXM_NX_REG10[[12]]
+ table=75, priority=95,ip,reg15=0x2,metadata=0x1,dl_dst=00:00:00:00:00:04,nw_dst=20.0.0.4 actions=load:0->NXM_NX_REG10[[12]]
+ table=75, priority=95,ip,reg15=0x2,metadata=0x1,dl_dst=00:00:00:00:00:04,nw_dst=224.0.0.0/4 actions=load:0->NXM_NX_REG10[[12]]
+ table=75, priority=95,ip,reg15=0x2,metadata=0x1,dl_dst=00:00:00:00:00:04,nw_dst=255.255.255.255 actions=load:0->NXM_NX_REG10[[12]]
+ table=75, priority=95,ip,reg15=0x2,metadata=0x1,dl_dst=00:00:00:00:00:04,nw_dst=30.0.0.0/16 actions=load:0->NXM_NX_REG10[[12]]
+ table=75, priority=95,ipv6,reg15=0x2,metadata=0x1,dl_dst=00:00:00:00:00:04,ipv6_dst=1000::4 actions=load:0->NXM_NX_REG10[[12]]
+ table=75, priority=95,ipv6,reg15=0x2,metadata=0x1,dl_dst=00:00:00:00:00:04,ipv6_dst=2000::/64 actions=load:0->NXM_NX_REG10[[12]]
+ table=75, priority=95,ipv6,reg15=0x2,metadata=0x1,dl_dst=00:00:00:00:00:04,ipv6_dst=fe80::200:ff:fe00:4 actions=load:0->NXM_NX_REG10[[12]]
+ table=75, priority=95,ipv6,reg15=0x2,metadata=0x1,dl_dst=00:00:00:00:00:04,ipv6_dst=ff00::/8 actions=load:0->NXM_NX_REG10[[12]]
+ table=75, priority=95,ipv6,reg15=0x2,metadata=0x1,dl_dst=00:00:00:00:00:13,ipv6_dst=aef0::4 actions=load:0->NXM_NX_REG10[[12]]
+ table=75, priority=95,ipv6,reg15=0x2,metadata=0x1,dl_dst=00:00:00:00:00:13,ipv6_dst=fe80::200:ff:fe00:13 actions=load:0->NXM_NX_REG10[[12]]
+ table=75, priority=95,ipv6,reg15=0x2,metadata=0x1,dl_dst=00:00:00:00:00:13,ipv6_dst=ff00::/8 actions=load:0->NXM_NX_REG10[[12]]" > hv2_t75_flows.expected
+
+check_port_sec_offlows hv2 75
+
+check ovn-sbctl clear port-binding sw0p2 port_security
+check ovn-nbctl --wait=hv sync
+
+check_port_sec_offlows hv1 73
+check_port_sec_offlows hv1 74
+check_port_sec_offlows hv1 75
+
+> hv2_t73_flows.expected
+> hv2_t74_flows.expected
+> hv2_t75_flows.expected
+
+check_port_sec_offlows hv2 73
+check_port_sec_offlows hv2 74
+check_port_sec_offlows hv2 75
+
+check ovn-sbctl set port-binding sw0p2 port_security='"00:00:00:00:00:04"'
+
+check_port_sec_offlows hv1 73
+check_port_sec_offlows hv1 74
+check_port_sec_offlows hv1 75
+
+echo " table=73, priority=80,reg14=0x2,metadata=0x1 actions=load:0x1->NXM_NX_REG10[[12]]
+ table=73, priority=90,reg14=0x2,metadata=0x1,dl_src=00:00:00:00:00:04 actions=resubmit(,74)
+ table=73, priority=95,arp,reg14=0x2,metadata=0x1 actions=resubmit(,74)" > hv2_t73_flows.expected
+
+check_port_sec_offlows hv2 73
+
+# Delete sw0p2
+check ovn-nbctl --wait=hv lsp-del sw0p2
+
+> hv2_t73_flows.expected
+> hv2_t74_flows.expected
+> hv2_t75_flows.expected
+
+check_port_sec_offlows hv1 73
+check_port_sec_offlows hv1 74
+check_port_sec_offlows hv1 75
+
+check_port_sec_offlows hv2 73
+check_port_sec_offlows hv2 74
+check_port_sec_offlows hv2 75
+
+# Release sw0p1 from hv1
+as hv1 ovs-vsctl del-port hv1-vif0
+
+wait_column '' Port_Binding chassis logical_port=sw0p1
+
+> hv1_t73_flows.expected
+> hv1_t74_flows.expected
+> hv1_t75_flows.expected
+
+check_port_sec_offlows hv1 73
+check_port_sec_offlows hv1 74
+check_port_sec_offlows hv1 75
+
+check_port_sec_offlows hv2 73
+check_port_sec_offlows hv2 74
+check_port_sec_offlows hv2 75
+
+OVN_CLEANUP([hv1], [hv2])
+AT_CLEANUP
+])