diff mbox series

[ovs-dev,branch-21.09,2/3] nb: Add support for Load_Balancer_Groups.

Message ID 20211102193042.615.91392.stgit@dceara.remote.csb
State Accepted
Headers show
Series Improve Load Balancer performance. | expand

Checks

Context Check Description
ovsrobot/apply-robot success apply and check: success
ovsrobot/github-robot-_Build_and_Test success github build: passed
ovsrobot/github-robot-_ovn-kubernetes success github build: passed

Commit Message

Dumitru Ceara Nov. 2, 2021, 7:30 p.m. UTC
For deployments when a large number of load balancers are associated
to multiple logical switches/routers, introduce a syntactic sugar
in the OVN_Northbound database (Load_Balancer_Groups) to simplify
configuration.

Instead of associating N Load_Balancer records to M Logical_Switches
(M x N references in the NB database) we can instead create a single
Load_Balancer_Group record, associate all N Load_Balancer records to
it, and associate it to all M Logical_Switches (in total M + N
references in the NB database).

This makes it easier for the CMS to configure Load Balancers (e.g., in
the ovn-kubernetes use case cluster load balancers are applied to all
node logical switches and node logical gateway routers) but also
drastically improves performance on the ovsdb-server NB side.  This
happens thanks to the fact that ovsdb-server now has to track M times
less references.

With a micro benchmark which creates 120 logical switches and
associates 1000 load balancers to them (with ovn-nbctl daemon) we
measure:

             CPU Time NB DB          CPU Time ovn-nbctl
  -----------------------------------------------------
  Plain LB:             30s                         35s
  LB Groups:             1s                          2s

Reported-at: https://bugzilla.redhat.com/2001528
Signed-off-by: Dumitru Ceara <dceara@redhat.com>
(cherry picked from commit f6aba21c9de8952beccf7ee7e98cfa28618f1edf)
---
 NEWS                  |    2 
 northd/northd.c       |  242 +++++++++++++++++++++++++++++++-----------------
 ovn-nb.ovsschema      |   24 ++++-
 ovn-nb.xml            |   37 ++++++-
 tests/ovn-northd.at   |  246 +++++++++++++++++++++++++++++++++++++++++--------
 utilities/ovn-nbctl.c |    3 +
 6 files changed, 424 insertions(+), 130 deletions(-)
diff mbox series

Patch

diff --git a/NEWS b/NEWS
index 8fce36482..63d765aa4 100644
--- a/NEWS
+++ b/NEWS
@@ -1,5 +1,7 @@ 
 OVN v21.09.1 - xx xxx xxxx
 --------------------------
+  - Added Load_Balancer_Group support, which simplifies large scale
+    configurations of load balancers.
 
 OVN v21.09.0 - 01 Oct 2021
 --------------------------
diff --git a/northd/northd.c b/northd/northd.c
index 502c263cc..c1d83548e 100644
--- a/northd/northd.c
+++ b/northd/northd.c
@@ -827,17 +827,74 @@  static void destroy_router_enternal_ips(struct ovn_datapath *od)
     sset_destroy(&od->external_ips);
 }
 
+static bool
+lb_has_vip(const struct nbrec_load_balancer *lb)
+{
+    return !smap_is_empty(&lb->vips);
+}
+
+static bool
+lb_group_has_vip(const struct nbrec_load_balancer_group *lb_group)
+{
+    for (size_t i = 0; i < lb_group->n_load_balancer; i++) {
+        if (lb_has_vip(lb_group->load_balancer[i])) {
+            return true;
+        }
+    }
+    return false;
+}
+
+static bool
+ls_has_lb_vip(struct ovn_datapath *od)
+{
+    for (size_t i = 0; i < od->nbs->n_load_balancer; i++) {
+        if (lb_has_vip(od->nbs->load_balancer[i])) {
+            return true;
+        }
+    }
+
+    for (size_t i = 0; i < od->nbs->n_load_balancer_group; i++) {
+        if (lb_group_has_vip(od->nbs->load_balancer_group[i])) {
+            return true;
+        }
+    }
+    return false;
+}
+
+static bool
+lr_has_lb_vip(struct ovn_datapath *od)
+{
+    for (size_t i = 0; i < od->nbr->n_load_balancer; i++) {
+        if (lb_has_vip(od->nbr->load_balancer[i])) {
+            return true;
+        }
+    }
+
+    for (size_t i = 0; i < od->nbr->n_load_balancer_group; i++) {
+        if (lb_group_has_vip(od->nbr->load_balancer_group[i])) {
+            return true;
+        }
+    }
+    return false;
+}
+
 static void
-init_lb_ips(struct ovn_datapath *od)
+init_lb_for_datapath(struct ovn_datapath *od)
 {
     sset_init(&od->lb_ips_v4);
     sset_init(&od->lb_ips_v4_routable);
     sset_init(&od->lb_ips_v6);
     sset_init(&od->lb_ips_v6_routable);
+
+    if (od->nbs) {
+        od->has_lb_vip = ls_has_lb_vip(od);
+    } else {
+        od->has_lb_vip = lr_has_lb_vip(od);
+    }
 }
 
 static void
-destroy_lb_ips(struct ovn_datapath *od)
+destroy_lb_for_datapath(struct ovn_datapath *od)
 {
     if (!od->nbs && !od->nbr) {
         return;
@@ -895,7 +952,7 @@  ovn_datapath_destroy(struct hmap *datapaths, struct ovn_datapath *od)
         free(od->router_ports);
         destroy_nat_entries(od);
         destroy_router_enternal_ips(od);
-        destroy_lb_ips(od);
+        destroy_lb_for_datapath(od);
         free(od->nat_entries);
         free(od->localnet_ports);
         free(od->l3dgw_ports);
@@ -1224,7 +1281,7 @@  join_datapaths(struct northd_context *ctx, struct hmap *datapaths,
 
         init_ipam_info_for_datapath(od);
         init_mcast_info_for_datapath(od);
-        init_lb_ips(od);
+        init_lb_for_datapath(od);
     }
 
     const struct nbrec_logical_router *nbr;
@@ -1257,7 +1314,7 @@  join_datapaths(struct northd_context *ctx, struct hmap *datapaths,
         init_mcast_info_for_datapath(od);
         init_nat_entries(od);
         init_router_external_ips(od);
-        init_lb_ips(od);
+        init_lb_for_datapath(od);
         if (smap_get(&od->nbr->options, "chassis")) {
             od->is_gw_router = true;
         }
@@ -2590,7 +2647,7 @@  get_nat_addresses(const struct ovn_port *op, size_t *n, bool routable_only)
     size_t n_nats = 0;
     struct eth_addr mac;
     if (!op || !op->nbrp || !op->od || !op->od->nbr
-        || (!op->od->nbr->n_nat && !op->od->nbr->n_load_balancer)
+        || (!op->od->nbr->n_nat && !op->od->has_lb_vip)
         || !eth_addr_from_string(op->nbrp->mac, &mac)
         || op->od->n_l3dgw_ports > 1) {
         *n = n_nats;
@@ -3560,7 +3617,7 @@  build_ovn_lr_lbs(struct hmap *datapaths, struct hmap *lbs)
         }
         if (!smap_get(&od->nbr->options, "chassis")
             && od->n_l3dgw_ports != 1) {
-            if (od->n_l3dgw_ports > 1 && od->nbr->n_load_balancer) {
+            if (od->n_l3dgw_ports > 1 && od->has_lb_vip) {
                 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
                 VLOG_WARN_RL(&rl, "Load-balancers are configured on logical "
                              "router %s, which has %"PRIuSIZE" distributed "
@@ -3578,6 +3635,17 @@  build_ovn_lr_lbs(struct hmap *datapaths, struct hmap *lbs)
             lb = ovn_northd_lb_find(lbs, lb_uuid);
             ovn_northd_lb_add_lr(lb, od);
         }
+
+        for (size_t i = 0; i < od->nbr->n_load_balancer_group; i++) {
+            const struct nbrec_load_balancer_group *lbg =
+                od->nbr->load_balancer_group[i];
+            for (size_t j = 0; j < lbg->n_load_balancer; j++) {
+                const struct uuid *lb_uuid =
+                    &lbg->load_balancer[j]->header_.uuid;
+                lb = ovn_northd_lb_find(lbs, lb_uuid);
+                ovn_northd_lb_add_lr(lb, od);
+            }
+        }
     }
 }
 
@@ -3608,6 +3676,17 @@  build_ovn_lbs(struct northd_context *ctx, struct hmap *datapaths,
             lb = ovn_northd_lb_find(lbs, lb_uuid);
             ovn_northd_lb_add_ls(lb, od);
         }
+
+        for (size_t i = 0; i < od->nbs->n_load_balancer_group; i++) {
+            const struct nbrec_load_balancer_group *lbg =
+                od->nbs->load_balancer_group[i];
+            for (size_t j = 0; j < lbg->n_load_balancer; j++) {
+                const struct uuid *lb_uuid =
+                    &lbg->load_balancer[j]->header_.uuid;
+                lb = ovn_northd_lb_find(lbs, lb_uuid);
+                ovn_northd_lb_add_ls(lb, od);
+            }
+        }
     }
 
     /* Delete any stale SB load balancer rows. */
@@ -3716,6 +3795,26 @@  build_ovn_lb_svcs(struct northd_context *ctx, struct hmap *ports,
     hmap_destroy(&monitor_map);
 }
 
+static void
+build_lrouter_lb_ips(struct ovn_datapath *od, const struct ovn_northd_lb *lb)
+{
+    bool is_routable = smap_get_bool(&lb->nlb->options, "add_route",  false);
+    const char *ip_address;
+
+    SSET_FOR_EACH (ip_address, &lb->ips_v4) {
+        sset_add(&od->lb_ips_v4, ip_address);
+        if (is_routable) {
+            sset_add(&od->lb_ips_v4_routable, ip_address);
+        }
+    }
+    SSET_FOR_EACH (ip_address, &lb->ips_v6) {
+        sset_add(&od->lb_ips_v6, ip_address);
+        if (is_routable) {
+            sset_add(&od->lb_ips_v6_routable, ip_address);
+        }
+    }
+}
+
 static void
 build_lrouter_lbs(struct hmap *datapaths, struct hmap *lbs)
 {
@@ -3730,20 +3829,17 @@  build_lrouter_lbs(struct hmap *datapaths, struct hmap *lbs)
             struct ovn_northd_lb *lb =
                 ovn_northd_lb_find(lbs,
                                    &od->nbr->load_balancer[i]->header_.uuid);
-            const char *ip_address;
-            bool is_routable = smap_get_bool(&lb->nlb->options, "add_route",
-                                             false);
-            SSET_FOR_EACH (ip_address, &lb->ips_v4) {
-                sset_add(&od->lb_ips_v4, ip_address);
-                if (is_routable) {
-                    sset_add(&od->lb_ips_v4_routable, ip_address);
-                }
-            }
-            SSET_FOR_EACH (ip_address, &lb->ips_v6) {
-                sset_add(&od->lb_ips_v6, ip_address);
-                if (is_routable) {
-                    sset_add(&od->lb_ips_v6_routable, ip_address);
-                }
+            build_lrouter_lb_ips(od, lb);
+        }
+
+        for (size_t i = 0; i < od->nbr->n_load_balancer_group; i++) {
+            const struct nbrec_load_balancer_group *lbg =
+                od->nbr->load_balancer_group[i];
+            for (size_t j = 0; j < lbg->n_load_balancer; j++) {
+                struct ovn_northd_lb *lb =
+                    ovn_northd_lb_find(lbs,
+                                       &lbg->load_balancer[j]->header_.uuid);
+                build_lrouter_lb_ips(od, lb);
             }
         }
     }
@@ -5551,22 +5647,8 @@  build_empty_lb_event_flow(struct ovn_lb_vip *lb_vip,
     return true;
 }
 
-static bool
-ls_has_lb_vip(struct ovn_datapath *od)
-{
-    for (int i = 0; i < od->nbs->n_load_balancer; i++) {
-        struct nbrec_load_balancer *nb_lb = od->nbs->load_balancer[i];
-        if (!smap_is_empty(&nb_lb->vips)) {
-            return true;
-        }
-    }
-
-    return false;
-}
-
 static void
-build_pre_lb(struct ovn_datapath *od, struct hmap *lflows,
-             struct hmap *lbs)
+build_pre_lb(struct ovn_datapath *od, struct hmap *lflows)
 {
     /* Do not send ND packets to conntrack */
     ovn_lflow_add(lflows, od, S_SWITCH_IN_PRE_LB, 110,
@@ -5601,49 +5683,41 @@  build_pre_lb(struct ovn_datapath *od, struct hmap *lflows,
                                  110, lflows);
     }
 
-    for (int i = 0; i < od->nbs->n_load_balancer; i++) {
-        struct nbrec_load_balancer *nb_lb = od->nbs->load_balancer[i];
-        struct ovn_northd_lb *lb =
-            ovn_northd_lb_find(lbs, &nb_lb->header_.uuid);
-        ovs_assert(lb);
-
-        /* 'REGBIT_CONNTRACK_NAT' is set to let the pre-stateful table send
-         * packet to conntrack for defragmentation and possibly for unNATting.
-         *
-         * Send all the packets to conntrack in the ingress pipeline if the
-         * logical switch has a load balancer with VIP configured. Earlier
-         * we used to set the REGBIT_CONNTRACK_DEFRAG flag in the ingress
-         * pipeline if the IP destination matches the VIP. But this causes
-         * few issues when a logical switch has no ACLs configured with
-         * allow-related.
-         * To understand the issue, lets a take a TCP load balancer -
-         * 10.0.0.10:80=10.0.0.3:80.
-         * If a logical port - p1 with IP - 10.0.0.5 opens a TCP connection
-         * with the VIP - 10.0.0.10, then the packet in the ingress pipeline
-         * of 'p1' is sent to the p1's conntrack zone id and the packet is
-         * load balanced to the backend - 10.0.0.3. For the reply packet from
-         * the backend lport, it is not sent to the conntrack of backend
-         * lport's zone id. This is fine as long as the packet is valid.
-         * Suppose the backend lport sends an invalid TCP packet (like
-         * incorrect sequence number), the packet gets * delivered to the
-         * lport 'p1' without unDNATing the packet to the VIP - 10.0.0.10.
-         * And this causes the connection to be reset by the lport p1's VIF.
-         *
-         * We can't fix this issue by adding a logical flow to drop ct.inv
-         * packets in the egress pipeline since it will drop all other
-         * connections not destined to the load balancers.
-         *
-         * To fix this issue, we send all the packets to the conntrack in the
-         * ingress pipeline if a load balancer is configured. We can now
-         * add a lflow to drop ct.inv packets.
-         */
-        if (lb->n_vips) {
-            ovn_lflow_add(lflows, od, S_SWITCH_IN_PRE_LB,
-                          100, "ip", REGBIT_CONNTRACK_NAT" = 1; next;");
-            ovn_lflow_add(lflows, od, S_SWITCH_OUT_PRE_LB,
-                          100, "ip", REGBIT_CONNTRACK_NAT" = 1; next;");
-            break;
-        }
+    /* 'REGBIT_CONNTRACK_NAT' is set to let the pre-stateful table send
+     * packet to conntrack for defragmentation and possibly for unNATting.
+     *
+     * Send all the packets to conntrack in the ingress pipeline if the
+     * logical switch has a load balancer with VIP configured. Earlier
+     * we used to set the REGBIT_CONNTRACK_DEFRAG flag in the ingress
+     * pipeline if the IP destination matches the VIP. But this causes
+     * few issues when a logical switch has no ACLs configured with
+     * allow-related.
+     * To understand the issue, lets a take a TCP load balancer -
+     * 10.0.0.10:80=10.0.0.3:80.
+     * If a logical port - p1 with IP - 10.0.0.5 opens a TCP connection
+     * with the VIP - 10.0.0.10, then the packet in the ingress pipeline
+     * of 'p1' is sent to the p1's conntrack zone id and the packet is
+     * load balanced to the backend - 10.0.0.3. For the reply packet from
+     * the backend lport, it is not sent to the conntrack of backend
+     * lport's zone id. This is fine as long as the packet is valid.
+     * Suppose the backend lport sends an invalid TCP packet (like
+     * incorrect sequence number), the packet gets * delivered to the
+     * lport 'p1' without unDNATing the packet to the VIP - 10.0.0.10.
+     * And this causes the connection to be reset by the lport p1's VIF.
+     *
+     * We can't fix this issue by adding a logical flow to drop ct.inv
+     * packets in the egress pipeline since it will drop all other
+     * connections not destined to the load balancers.
+     *
+     * To fix this issue, we send all the packets to the conntrack in the
+     * ingress pipeline if a load balancer is configured. We can now
+     * add a lflow to drop ct.inv packets.
+     */
+    if (od->has_lb_vip) {
+        ovn_lflow_add(lflows, od, S_SWITCH_IN_PRE_LB,
+                      100, "ip", REGBIT_CONNTRACK_NAT" = 1; next;");
+        ovn_lflow_add(lflows, od, S_SWITCH_OUT_PRE_LB,
+                      100, "ip", REGBIT_CONNTRACK_NAT" = 1; next;");
     }
 }
 
@@ -7325,15 +7399,13 @@  static void
 build_lswitch_lflows_pre_acl_and_acl(struct ovn_datapath *od,
                                      struct hmap *port_groups,
                                      struct hmap *lflows,
-                                     struct shash *meter_groups,
-                                     struct hmap *lbs)
+                                     struct shash *meter_groups)
 {
     if (od->nbs) {
-        od->has_lb_vip = ls_has_lb_vip(od);
         ls_get_acl_flags(od);
 
         build_pre_acls(od, port_groups, lflows);
-        build_pre_lb(od, lflows, lbs);
+        build_pre_lb(od, lflows);
         build_pre_stateful(od, lflows);
         build_acl_hints(od, lflows);
         build_acls(od, lflows, port_groups, meter_groups);
@@ -12583,7 +12655,7 @@  build_lrouter_nat_defrag_and_lb(struct ovn_datapath *od, struct hmap *lflows,
      * flag set. Some NICs are unable to offload these flows.
      */
     if ((od->is_gw_router || od->n_l3dgw_ports) &&
-        (od->nbr->n_nat || od->nbr->n_load_balancer)) {
+        (od->nbr->n_nat || od->has_lb_vip)) {
         ovn_lflow_add(lflows, od, S_ROUTER_OUT_UNDNAT, 50,
                       "ip", "flags.loopback = 1; ct_dnat;");
         ovn_lflow_add(lflows, od, S_ROUTER_OUT_POST_UNDNAT, 50,
@@ -12801,7 +12873,7 @@  build_lswitch_and_lrouter_iterate_by_od(struct ovn_datapath *od,
 {
     /* Build Logical Switch Flows. */
     build_lswitch_lflows_pre_acl_and_acl(od, lsi->port_groups, lsi->lflows,
-                                         lsi->meter_groups, lsi->lbs);
+                                         lsi->meter_groups);
 
     build_fwd_group_lflows(od, lsi->lflows);
     build_lswitch_lflows_admission_control(od, lsi->lflows);
diff --git a/ovn-nb.ovsschema b/ovn-nb.ovsschema
index 2ac8ef3ea..5dee04fe9 100644
--- a/ovn-nb.ovsschema
+++ b/ovn-nb.ovsschema
@@ -1,7 +1,7 @@ 
 {
     "name": "OVN_Northbound",
-    "version": "5.32.1",
-    "cksum": "2805328215 29734",
+    "version": "5.33.1",
+    "cksum": "1931852754 30731",
     "tables": {
         "NB_Global": {
             "columns": {
@@ -61,6 +61,11 @@ 
                                                   "refType": "weak"},
                                            "min": 0,
                                            "max": "unlimited"}},
+                "load_balancer_group": {
+                    "type": {"key": {"type": "uuid",
+                                     "refTable": "Load_Balancer_Group"},
+                             "min": 0,
+                             "max": "unlimited"}},
                 "dns_records": {"type": {"key": {"type": "uuid",
                                          "refTable": "DNS",
                                          "refType": "weak"},
@@ -208,6 +213,16 @@ 
                     "type": {"key": "string", "value": "string",
                              "min": 0, "max": "unlimited"}}},
             "isRoot": true},
+        "Load_Balancer_Group": {
+            "columns": {
+                "name": {"type": "string"},
+                "load_balancer": {"type": {"key": {"type": "uuid",
+                                                   "refTable": "Load_Balancer",
+                                                   "refType": "weak"},
+                                 "min": 0,
+                                 "max": "unlimited"}}},
+            "indexes": [["name"]],
+            "isRoot": true},
         "Load_Balancer_Health_Check": {
             "columns": {
                 "vip": {"type": "string"},
@@ -336,6 +351,11 @@ 
                                                   "refType": "weak"},
                                            "min": 0,
                                            "max": "unlimited"}},
+                "load_balancer_group": {
+                    "type": {"key": {"type": "uuid",
+                                     "refTable": "Load_Balancer_Group"},
+                             "min": 0,
+                             "max": "unlimited"}},
                 "copp": {"type": {"key": {"type": "uuid", "refTable": "Copp",
                                           "refType": "weak"},
                                   "min": 0, "max": 1}},
diff --git a/ovn-nb.xml b/ovn-nb.xml
index 390cc5a44..93e358f13 100644
--- a/ovn-nb.xml
+++ b/ovn-nb.xml
@@ -450,8 +450,11 @@ 
     </column>
 
     <column name="load_balancer">
-      Load balance a virtual ip address to a set of logical port endpoint
-      ip addresses.
+      Set of load balancers associated to this logical switch.
+    </column>
+
+    <column name="load_balancer_group">
+      Set of load balancers groups associated to this logical switch.
     </column>
 
     <column name="acls">
@@ -1812,6 +1815,26 @@ 
     </group>
   </table>
 
+  <table name="Load_Balancer_Group" title="load balancer group">
+    <p>
+      Each row represents a logical grouping of load balancers.  It is up to
+      the CMS to decide the criteria on which load balancers are grouped
+      together.  To simplify configuration and to optimize its processing
+      load balancers that must be associated to the same set of logical
+      switches and/or logical routers should be grouped together.
+    </p>
+
+    <column name="name">
+      A name for the load balancer group.  This name has no special meaning or
+      purpose other than to provide convenience for human interaction with
+      the ovn-nb database.
+    </column>
+
+    <column name="load_balancer">
+      A set of load balancers.
+    </column>
+  </table>
+
   <table name="Load_Balancer_Health_Check" title="load balancer">
     <p>
       Each row represents one load balancer health check. Health checks
@@ -2057,9 +2080,13 @@ 
     </column>
 
     <column name="load_balancer">
-      Load balance a virtual ip address to a set of logical port ip
-      addresses.  Load balancer rules only work on the Gateway routers or
-      routers with one and only one distributed gateway port.
+      Set of load balancers associated to this logical router.  Load balancer
+      Load balancer rules only work on the Gateway routers or routers with one
+      and only one distributed gateway port.
+    </column>
+
+    <column name="load_balancer_group">
+      Set of load balancers groups associated to this logical router.
     </column>
 
     <group title="Naming">
diff --git a/tests/ovn-northd.at b/tests/ovn-northd.at
index 66cd03bf2..166723502 100644
--- a/tests/ovn-northd.at
+++ b/tests/ovn-northd.at
@@ -1438,7 +1438,40 @@  check ovn-nbctl --wait=sb lr-nat-add lr0 dnat 192.168.2.5 10.0.0.5
 ovn-sbctl dump-flows lr0 > sbflows
 AT_CAPTURE_FILE([sbflows])
 
-# There shoule be no flows for LB VIPs in lr_in_unsnat if the VIP is not a
+# There should be no flows for LB VIPs in lr_in_unsnat if the VIP is not a
+# dnat_and_snat or snat entry.
+AT_CHECK([grep "lr_in_unsnat" sbflows | sort], [0], [dnl
+  table=4 (lr_in_unsnat       ), priority=0    , match=(1), action=(next;)
+  table=4 (lr_in_unsnat       ), priority=120  , match=(ip4 && ip4.dst == 192.168.2.1 && tcp && tcp.dst == 8080), action=(next;)
+  table=4 (lr_in_unsnat       ), priority=120  , match=(ip4 && ip4.dst == 192.168.2.4 && udp && udp.dst == 8080), action=(next;)
+  table=4 (lr_in_unsnat       ), priority=120  , match=(ip4 && ip4.dst == 192.168.2.5 && tcp && tcp.dst == 8080), action=(next;)
+  table=4 (lr_in_unsnat       ), priority=90   , match=(ip && ip4.dst == 192.168.2.1), action=(ct_snat;)
+  table=4 (lr_in_unsnat       ), priority=90   , match=(ip && ip4.dst == 192.168.2.4), action=(ct_snat;)
+])
+
+AS_BOX([Check behavior with LB Groups])
+check ovn-nbctl lr-lb-del lr0 lb1
+check ovn-nbctl lr-lb-del lr0 lb2
+check ovn-nbctl lr-lb-del lr0 lb3
+check ovn-nbctl lr-lb-del lr0 lb4
+
+lb1=$(fetch_column nb:load_balancer _uuid name=lb1)
+lb2=$(fetch_column nb:load_balancer _uuid name=lb2)
+lb3=$(fetch_column nb:load_balancer _uuid name=lb3)
+lb4=$(fetch_column nb:load_balancer _uuid name=lb4)
+
+lbg=$(ovn-nbctl create load_balancer_group name=lbg -- \
+  add load_balancer_group lbg load_balancer $lb1 -- \
+  add load_balancer_group lbg load_balancer $lb2 -- \
+  add load_balancer_group lbg load_balancer $lb3 -- \
+  add load_balancer_group lbg load_balancer $lb4)
+
+check ovn-nbctl --wait=sb add logical_router lr0 load_balancer_group $lbg
+
+ovn-sbctl dump-flows lr0 > sbflows
+AT_CAPTURE_FILE([sbflows])
+
+# There should be no flows for LB VIPs in lr_in_unsnat if the VIP is not a
 # dnat_and_snat or snat entry.
 AT_CHECK([grep "lr_in_unsnat" sbflows | sort], [0], [dnl
   table=4 (lr_in_unsnat       ), priority=0    , match=(1), action=(next;)
@@ -1857,53 +1890,79 @@  ovn-nbctl lsp-add sw0 sw0-p1
 ovn-nbctl lb-add lb1 "10.0.0.10" "10.0.0.3"
 ovn-nbctl lb-add lb2 "10.0.0.11" "10.0.0.4"
 
+ovn-nbctl lb-add lb3 "10.0.0.12" "10.0.0.5"
+ovn-nbctl lb-add lb4 "10.0.0.13" "10.0.0.6"
+
+lb1=$(fetch_column nb:load_balancer _uuid name=lb1)
+lb2=$(fetch_column nb:load_balancer _uuid name=lb2)
+lb3=$(fetch_column nb:load_balancer _uuid name=lb3)
+lb4=$(fetch_column nb:load_balancer _uuid name=lb4)
+
+lbg=$(ovn-nbctl create load_balancer_group name=lbg)
+check ovn-nbctl add logical_switch  sw0 load_balancer_group $lbg
+
 ovn-nbctl --wait=sb sync
 AT_CHECK([ovn-sbctl lflow-list | grep "ls_out_pre_lb.*priority=100" | grep reg0 | sort], [0], [dnl
 ])
 
-ovn-nbctl ls-lb-add sw0 lb1
-ovn-nbctl --wait=sb sync
+check ovn-nbctl ls-lb-add sw0 lb1
+check ovn-nbctl add load_balancer_group $lbg load_balancer $lb3
+check ovn-nbctl --wait=sb sync
 AT_CHECK([ovn-sbctl lflow-list | grep "ls_out_pre_lb.*priority=100" | grep reg0 | sort], [0], [dnl
   table=0 (ls_out_pre_lb      ), priority=100  , match=(ip), action=(reg0[[2]] = 1; next;)
 ])
 
-ovn-nbctl ls-lb-add sw0 lb2
-ovn-nbctl --wait=sb sync
+check ovn-nbctl ls-lb-add sw0 lb2
+check ovn-nbctl add load_balancer_group $lbg load_balancer $lb4
+check ovn-nbctl --wait=sb sync
 AT_CHECK([ovn-sbctl lflow-list | grep "ls_out_pre_lb.*priority=100" | grep reg0 | sort], [0], [dnl
   table=0 (ls_out_pre_lb      ), priority=100  , match=(ip), action=(reg0[[2]] = 1; next;)
 ])
 
-lb1_uuid=$(ovn-nbctl --bare --columns _uuid find load_balancer name=lb1)
-lb2_uuid=$(ovn-nbctl --bare --columns _uuid find load_balancer name=lb2)
+check ovn-nbctl clear load_balancer $lb1 vips
+check ovn-nbctl clear load_balancer $lb3 vips
+check ovn-nbctl --wait=sb sync
+AT_CHECK([ovn-sbctl lflow-list | grep "ls_out_pre_lb.*priority=100" | grep reg0 | sort], [0], [dnl
+  table=0 (ls_out_pre_lb      ), priority=100  , match=(ip), action=(reg0[[2]] = 1; next;)
+])
 
-ovn-nbctl clear load_balancer $lb1_uuid vips
-ovn-nbctl --wait=sb sync
+check ovn-nbctl clear load_balancer $lb2 vips
+check ovn-nbctl --wait=sb sync
 AT_CHECK([ovn-sbctl lflow-list | grep "ls_out_pre_lb.*priority=100" | grep reg0 | sort], [0], [dnl
   table=0 (ls_out_pre_lb      ), priority=100  , match=(ip), action=(reg0[[2]] = 1; next;)
 ])
 
-ovn-nbctl clear load_balancer $lb2_uuid vips
-ovn-nbctl --wait=sb sync
+check ovn-nbctl clear load_balancer $lb4 vips
+check ovn-nbctl --wait=sb sync
 AT_CHECK([ovn-sbctl lflow-list | grep "ls_out_pre_lb.*priority=100" | grep reg0 | sort], [0], [dnl
 ])
 
-ovn-nbctl set load_balancer $lb1_uuid vips:"10.0.0.10"="10.0.0.3"
-ovn-nbctl set load_balancer $lb2_uuid vips:"10.0.0.11"="10.0.0.4"
+check ovn-nbctl set load_balancer $lb1 vips:"10.0.0.10"="10.0.0.3"
+check ovn-nbctl set load_balancer $lb2 vips:"10.0.0.11"="10.0.0.4"
+check ovn-nbctl set load_balancer $lb3 vips:"10.0.0.12"="10.0.0.5"
+check ovn-nbctl set load_balancer $lb4 vips:"10.0.0.13"="10.0.0.6"
 
-ovn-nbctl --wait=sb sync
+check ovn-nbctl --wait=sb sync
 AT_CHECK([ovn-sbctl lflow-list | grep "ls_out_pre_lb.*priority=100" | grep reg0 | sort], [0], [dnl
   table=0 (ls_out_pre_lb      ), priority=100  , match=(ip), action=(reg0[[2]] = 1; next;)
 ])
 
 # Now reverse the order of clearing the vip.
-ovn-nbctl clear load_balancer $lb2_uuid vips
-ovn-nbctl --wait=sb sync
+check ovn-nbctl clear load_balancer $lb2 vips
+check ovn-nbctl clear load_balancer $lb4 vips
+check ovn-nbctl --wait=sb sync
 AT_CHECK([ovn-sbctl lflow-list | grep "ls_out_pre_lb.*priority=100" | grep reg0 | sort], [0], [dnl
   table=0 (ls_out_pre_lb      ), priority=100  , match=(ip), action=(reg0[[2]] = 1; next;)
 ])
 
-ovn-nbctl clear load_balancer $lb1_uuid vips
-ovn-nbctl --wait=sb sync
+check ovn-nbctl clear load_balancer $lb1 vips
+check ovn-nbctl --wait=sb sync
+AT_CHECK([ovn-sbctl lflow-list | grep "ls_out_pre_lb.*priority=100" | grep reg0 | sort], [0], [dnl
+  table=0 (ls_out_pre_lb      ), priority=100  , match=(ip), action=(reg0[[2]] = 1; next;)
+])
+
+check ovn-nbctl clear load_balancer $lb3 vips
+check ovn-nbctl --wait=sb sync
 AT_CHECK([ovn-sbctl lflow-list | grep "ls_out_pre_lb.*priority=100" | grep reg0 | sort], [0], [dnl
 ])
 
@@ -2345,23 +2404,29 @@  OVN_FOR_EACH_NORTHD([
 AT_SETUP([NB to SB load balancer sync])
 ovn_start
 
-check ovn-nbctl --wait=sb lb-add lb0 10.0.0.10:80 10.0.0.4:8080
-check_row_count nb:load_balancer 1
+check ovn-nbctl lb-add lb0 10.0.0.10:80 10.0.0.4:8080
+check ovn-nbctl --wait=sb lb-add lbg0 20.0.0.10:80 20.0.0.4:8080
+check_row_count nb:load_balancer 2
 
 echo
 echo "__file__:__line__: Check that there are no SB load balancer rows."
 check_row_count sb:load_balancer 0
 
-check ovn-nbctl ls-add sw0
+lbg0=$(fetch_column nb:load_balancer _uuid name=lbg0)
+lbg=$(ovn-nbctl create load_balancer_group name=lbg)
+check ovn-nbctl add load_balancer_group $lbg load_balancer $lbg0
+check ovn-nbctl ls-add sw0 -- add logical_switch sw0 load_balancer_group $lbg
 check ovn-nbctl --wait=sb ls-lb-add sw0 lb0
 sw0_sb_uuid=$(fetch_column datapath_binding _uuid external_ids:name=sw0)
 
 echo
-echo "__file__:__line__: Check that there is one SB load balancer row for lb0."
-check_row_count sb:load_balancer 1
+echo "__file__:__line__: Check that there is one SB load balancer row for lb0 and one for lbg0"
+check_row_count sb:load_balancer 2
 check_column "10.0.0.10:80=10.0.0.4:8080 tcp" sb:load_balancer vips,protocol name=lb0
+check_column "20.0.0.10:80=20.0.0.4:8080 tcp" sb:load_balancer vips,protocol name=lbg0
 
 lb0_uuid=$(fetch_column sb:load_balancer _uuid name=lb0)
+lbg0_uuid=$(fetch_column sb:load_balancer _uuid name=lbg0)
 
 echo
 echo "__file__:__line__: Check that SB lb0 has sw0 in datapaths column."
@@ -2369,7 +2434,13 @@  echo "__file__:__line__: Check that SB lb0 has sw0 in datapaths column."
 check_column "$sw0_sb_uuid" sb:load_balancer datapaths name=lb0
 check_column "" sb:datapath_binding load_balancers external_ids:name=sw0
 
-check ovn-nbctl --wait=sb set load_balancer . vips:"10.0.0.20\:90"="20.0.0.4:8080,30.0.0.4:8080"
+echo
+echo "__file__:__line__: Check that SB lbg0 has sw0 in datapaths column."
+
+check_column "$sw0_sb_uuid" sb:load_balancer datapaths name=lbg0
+check_column "" sb:datapath_binding load_balancers external_ids:name=sw0
+
+check ovn-nbctl --wait=sb set load_balancer lb0 vips:"10.0.0.20\:90"="20.0.0.4:8080,30.0.0.4:8080"
 
 echo
 echo "__file__:__line__: Check that SB lb0 has vips and protocol columns are set properly."
@@ -2377,38 +2448,61 @@  echo "__file__:__line__: Check that SB lb0 has vips and protocol columns are set
 check_column "10.0.0.10:80=10.0.0.4:8080 10.0.0.20:90=20.0.0.4:8080,30.0.0.4:8080 tcp" \
 sb:load_balancer vips,protocol name=lb0
 
-check ovn-nbctl lr-add lr0
+check ovn-nbctl --wait=sb set load_balancer lbg0 vips:"20.0.0.20\:90"="20.0.0.4:8080,30.0.0.4:8080"
+
+echo
+echo "__file__:__line__: Check that SB lbg0 has vips and protocol columns are set properly."
+
+check_column "20.0.0.10:80=20.0.0.4:8080 20.0.0.20:90=20.0.0.4:8080,30.0.0.4:8080 tcp" \
+sb:load_balancer vips,protocol name=lbg0
+
+check ovn-nbctl lr-add lr0 -- add logical_router lr0 load_balancer_group $lbg
 check ovn-nbctl --wait=sb lr-lb-add lr0 lb0
 
 echo
 echo "__file__:__line__: Check that SB lb0 has only sw0 in datapaths column."
 check_column "$sw0_sb_uuid" sb:load_balancer datapaths name=lb0
 
-check ovn-nbctl ls-add sw1
+echo
+echo "__file__:__line__: Check that SB lbg0 has only sw0 in datapaths column."
+check_column "$sw0_sb_uuid" sb:load_balancer datapaths name=lbg0
+
+check ovn-nbctl ls-add sw1 -- add logical_switch sw1 load_balancer_group $lbg
 check ovn-nbctl --wait=sb ls-lb-add sw1 lb0
 sw1_sb_uuid=$(fetch_column datapath_binding _uuid external_ids:name=sw1)
 
 echo
 echo "__file__:__line__: Check that SB lb0 has sw0 and sw1 in datapaths column."
 check_column "$sw0_sb_uuid $sw1_sb_uuid" sb:load_balancer datapaths name=lb0
+
+echo
+echo "__file__:__line__: Check that SB lbg0 has sw0 and sw1 in datapaths column."
+check_column "$sw0_sb_uuid $sw1_sb_uuid" sb:load_balancer datapaths name=lbg0
 check_column "" sb:datapath_binding load_balancers external_ids:name=sw1
 
 check ovn-nbctl --wait=sb lb-add lb1 10.0.0.30:80 20.0.0.50:8080 udp
-check_row_count sb:load_balancer 1
+check ovn-nbctl --wait=sb lb-add lbg1 20.0.0.30:80 20.0.0.50:8080 udp
+check_row_count sb:load_balancer 2
 
+lbg1=$(fetch_column nb:load_balancer _uuid name=lbg1)
+check ovn-nbctl add load_balancer_group $lbg load_balancer $lbg1
 check ovn-nbctl --wait=sb lr-lb-add lr0 lb1
-check_row_count sb:load_balancer 1
+check_row_count sb:load_balancer 3
 
 echo
 echo "__file__:__line__: Associate lb1 to sw1 and check that lb1 is created in SB DB."
 
 check ovn-nbctl --wait=sb ls-lb-add sw1 lb1
-check_row_count sb:load_balancer 2
+check_row_count sb:load_balancer 4
 
 echo
 echo "__file__:__line__: Check that SB lb1 has vips and protocol columns are set properly."
 check_column "10.0.0.30:80=20.0.0.50:8080 udp" sb:load_balancer vips,protocol name=lb1
 
+echo
+echo "__file__:__line__: Check that SB lbg1 has vips and protocol columns are set properly."
+check_column "20.0.0.30:80=20.0.0.50:8080 udp" sb:load_balancer vips,protocol name=lbg1
+
 lb1_uuid=$(fetch_column sb:load_balancer _uuid name=lb1)
 
 echo
@@ -2416,20 +2510,26 @@  echo "__file__:__line__: Check that SB lb1 has sw1 in datapaths column."
 
 check_column "$sw1_sb_uuid" sb:load_balancer datapaths name=lb1
 
+lbg1_uuid=$(fetch_column sb:load_balancer _uuid name=lbg1)
+
+echo
+echo "__file__:__line__: Check that SB lbg1 has sw0 and sw1 in datapaths column."
+
+check_column "$sw0_sb_uuid $sw1_sb_uuid" sb:load_balancer datapaths name=lbg1
+
 echo
 echo "__file__:__line__: check that datapath sw1 has no entry in the load_balancers column."
 check_column "" sb:datapath_binding load_balancers external_ids:name=sw1
 
-
 echo
 echo "__file__:__line__: Set hairpin_snat_ip on lb1 and check that SB DB is updated."
 check ovn-nbctl --wait=sb set Load_Balancer lb1 options:hairpin_snat_ip="42.42.42.42 4242::4242"
 check_column "$lb1_uuid" sb:load_balancer _uuid name=lb1 options='{hairpin_orig_tuple="true", hairpin_snat_ip="42.42.42.42 4242::4242"}'
 
 echo
-echo "__file__:__line__: Delete load balancer lb1 and check that datapath sw1's load_balancers is still empty."
+echo "__file__:__line__: Delete load balancers lb1 and lbg1 and check that datapath sw1's load_balancers is still empty."
 
-ovn-nbctl --wait=sb lb-del lb1
+ovn-nbctl --wait=sb lb-del lb1 -- lb-del lbg1
 check_column "" sb:datapath_binding load_balancers external_ids:name=sw1
 AT_CLEANUP
 ])
@@ -2438,11 +2538,52 @@  OVN_FOR_EACH_NORTHD([
 AT_SETUP([LS load balancer hairpin logical flows])
 ovn_start
 
+lbg=$(ovn-nbctl create load_balancer_group name=lbg)
+
 check ovn-nbctl \
-    -- ls-add sw0 \
-    -- lb-add lb0 10.0.0.10:80 10.0.0.4:8080 \
+    -- lb-add lb0 10.0.0.10:80 10.0.0.4:8080
+
+lb0=$(fetch_column nb:load_balancer _uuid name=lb0)
+
+check ovn-nbctl \
+    -- ls-add sw0 -- \
+    -- add logical_switch sw0 load_balancer_group $lbg \
     -- ls-lb-add sw0 lb0
+check ovn-nbctl --wait=sb sync
+
+AT_CHECK([ovn-sbctl lflow-list sw0 | grep ls_in_pre_hairpin | sort], [0], [dnl
+  table=13(ls_in_pre_hairpin  ), priority=0    , match=(1), action=(next;)
+  table=13(ls_in_pre_hairpin  ), priority=100  , match=(ip && ct.trk), action=(reg0[[6]] = chk_lb_hairpin(); reg0[[12]] = chk_lb_hairpin_reply(); next;)
+])
 
+AT_CHECK([ovn-sbctl lflow-list sw0 | grep ls_in_nat_hairpin | sort], [0], [dnl
+  table=14(ls_in_nat_hairpin  ), priority=0    , match=(1), action=(next;)
+  table=14(ls_in_nat_hairpin  ), priority=100  , match=(ip && ct.est && ct.trk && reg0[[6]] == 1), action=(ct_snat;)
+  table=14(ls_in_nat_hairpin  ), priority=100  , match=(ip && ct.new && ct.trk && reg0[[6]] == 1), action=(ct_snat_to_vip; next;)
+  table=14(ls_in_nat_hairpin  ), priority=90   , match=(ip && reg0[[12]] == 1), action=(ct_snat;)
+])
+
+AT_CHECK([ovn-sbctl lflow-list sw0 | grep ls_in_hairpin | sort], [0], [dnl
+  table=15(ls_in_hairpin      ), priority=0    , match=(1), action=(next;)
+  table=15(ls_in_hairpin      ), priority=1    , match=((reg0[[6]] == 1 || reg0[[12]] == 1)), action=(eth.dst <-> eth.src; outport = inport; flags.loopback = 1; output;)
+])
+
+check ovn-nbctl -- ls-lb-del sw0 lb0
+check ovn-nbctl --wait=sb sync
+
+AT_CHECK([ovn-sbctl lflow-list sw0 | grep ls_in_pre_hairpin | sort], [0], [dnl
+  table=13(ls_in_pre_hairpin  ), priority=0    , match=(1), action=(next;)
+])
+
+AT_CHECK([ovn-sbctl lflow-list sw0 | grep ls_in_nat_hairpin | sort], [0], [dnl
+  table=14(ls_in_nat_hairpin  ), priority=0    , match=(1), action=(next;)
+])
+
+AT_CHECK([ovn-sbctl lflow-list sw0 | grep ls_in_hairpin | sort], [0], [dnl
+  table=15(ls_in_hairpin      ), priority=0    , match=(1), action=(next;)
+])
+
+check ovn-nbctl -- add load_balancer_group $lbg load_balancer $lb0
 check ovn-nbctl --wait=sb sync
 
 AT_CHECK([ovn-sbctl lflow-list sw0 | grep ls_in_pre_hairpin | sort], [0], [dnl
@@ -3254,8 +3395,14 @@  check ovn-nbctl lsp-set-type public-lr0 router
 check ovn-nbctl lsp-set-addresses public-lr0 router
 check ovn-nbctl lsp-set-options public-lr0 router-port=lr0-public
 
+lbg=$(ovn-nbctl create load_balancer_group name=lbg)
+
 check ovn-nbctl lb-add lb1 10.0.0.10:80 10.0.0.4:8080
+check ovn-nbctl lb-add lbg1 10.0.0.100:80 10.0.0.40:8080
+lbg1=$(fetch_column nb:load_balancer _uuid name=lbg1)
+check ovn-nbctl add load_balancer_group $lbg load_balancer $lbg1
 check ovn-nbctl lr-lb-add lr0 lb1
+check ovn-nbctl add logical_router lr0 load_balancer_group $lbg
 check ovn-nbctl set logical_router lr0 options:chassis=ch1
 
 check ovn-nbctl --wait=sb sync
@@ -3270,12 +3417,15 @@  AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl
 AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
   table=5 (lr_in_defrag       ), priority=0    , match=(1), action=(next;)
   table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 10.0.0.10 && tcp), action=(reg0 = 10.0.0.10; reg9[[16..31]] = tcp.dst; ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 10.0.0.100 && tcp), action=(reg0 = 10.0.0.100; reg9[[16..31]] = tcp.dst; ct_dnat;)
 ])
 
 AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl
   table=6 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
   table=6 (lr_in_dnat         ), priority=120  , match=(ct.est && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_label.natted == 1), action=(next;)
+  table=6 (lr_in_dnat         ), priority=120  , match=(ct.est && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80 && ct_label.natted == 1), action=(next;)
   table=6 (lr_in_dnat         ), priority=120  , match=(ct.new && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(ct_lb(backends=10.0.0.4:8080);)
+  table=6 (lr_in_dnat         ), priority=120  , match=(ct.new && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80), action=(ct_lb(backends=10.0.0.40:8080);)
 ])
 
 AT_CHECK([grep "lr_out_undnat" lr0flows | sort], [0], [dnl
@@ -3303,12 +3453,15 @@  AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl
 AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
   table=5 (lr_in_defrag       ), priority=0    , match=(1), action=(next;)
   table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 10.0.0.10 && tcp), action=(reg0 = 10.0.0.10; reg9[[16..31]] = tcp.dst; ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 10.0.0.100 && tcp), action=(reg0 = 10.0.0.100; reg9[[16..31]] = tcp.dst; ct_dnat;)
 ])
 
 AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl
   table=6 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
   table=6 (lr_in_dnat         ), priority=120  , match=(ct.est && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_label.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=6 (lr_in_dnat         ), priority=120  , match=(ct.est && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80 && ct_label.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
   table=6 (lr_in_dnat         ), priority=120  , match=(ct.new && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb(backends=10.0.0.4:8080);)
+  table=6 (lr_in_dnat         ), priority=120  , match=(ct.new && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb(backends=10.0.0.40:8080);)
 ])
 
 AT_CHECK([grep "lr_out_snat" lr0flows | sort], [0], [dnl
@@ -3346,12 +3499,15 @@  AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl
 AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
   table=5 (lr_in_defrag       ), priority=0    , match=(1), action=(next;)
   table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 10.0.0.10 && tcp), action=(reg0 = 10.0.0.10; reg9[[16..31]] = tcp.dst; ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 10.0.0.100 && tcp), action=(reg0 = 10.0.0.100; reg9[[16..31]] = tcp.dst; ct_dnat;)
 ])
 
 AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl
   table=6 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
   table=6 (lr_in_dnat         ), priority=120  , match=(ct.est && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_label.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=6 (lr_in_dnat         ), priority=120  , match=(ct.est && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80 && ct_label.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
   table=6 (lr_in_dnat         ), priority=120  , match=(ct.new && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb(backends=10.0.0.4:8080);)
+  table=6 (lr_in_dnat         ), priority=120  , match=(ct.new && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb(backends=10.0.0.40:8080);)
 ])
 
 AT_CHECK([grep "lr_out_snat" lr0flows | sort], [0], [dnl
@@ -3403,12 +3559,15 @@  AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl
 AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
   table=5 (lr_in_defrag       ), priority=0    , match=(1), action=(next;)
   table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 10.0.0.10 && tcp), action=(reg0 = 10.0.0.10; reg9[[16..31]] = tcp.dst; ct_dnat;)
+  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 10.0.0.100 && tcp), action=(reg0 = 10.0.0.100; reg9[[16..31]] = tcp.dst; ct_dnat;)
 ])
 
 AT_CHECK([grep "lr_in_dnat" lr0flows | sort], [0], [dnl
   table=6 (lr_in_dnat         ), priority=0    , match=(1), action=(next;)
   table=6 (lr_in_dnat         ), priority=120  , match=(ct.est && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80 && ct_label.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
+  table=6 (lr_in_dnat         ), priority=120  , match=(ct.est && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80 && ct_label.natted == 1), action=(flags.force_snat_for_lb = 1; next;)
   table=6 (lr_in_dnat         ), priority=120  , match=(ct.new && ip4 && reg0 == 10.0.0.10 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb(backends=10.0.0.4:8080);)
+  table=6 (lr_in_dnat         ), priority=120  , match=(ct.new && ip4 && reg0 == 10.0.0.100 && tcp && reg9[[16..31]] == 80), action=(flags.force_snat_for_lb = 1; ct_lb(backends=10.0.0.40:8080);)
 ])
 
 AT_CHECK([grep "lr_out_snat" lr0flows | sort], [0], [dnl
@@ -3446,6 +3605,7 @@  AT_CHECK([grep "lr_in_unsnat" lr0flows | sort], [0], [dnl
 
 AT_CHECK([grep "lr_in_defrag" lr0flows | sort], [0], [dnl
   table=5 (lr_in_defrag       ), priority=0    , match=(1), action=(next;)
+  table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 10.0.0.100 && tcp), action=(reg0 = 10.0.0.100; reg9[[16..31]] = tcp.dst; ct_dnat;)
   table=5 (lr_in_defrag       ), priority=110  , match=(ip && ip4.dst == 10.0.0.20 && tcp), action=(reg0 = 10.0.0.20; reg9[[16..31]] = tcp.dst; ct_dnat;)
 ])
 
@@ -3577,10 +3737,17 @@  OVN_FOR_EACH_NORTHD([
 AT_SETUP([LS load balancer logical flows])
 ovn_start
 
+lbg=$(ovn-nbctl create load_balancer_group name=lbg)
 check ovn-nbctl \
-    -- ls-add sw0 \
     -- lb-add lb0 10.0.0.10:80 10.0.0.4:8080 \
-    -- ls-lb-add sw0 lb0
+    -- lb-add lbg0 10.0.0.20:80 10.0.0.40:8080
+lbg0=$(fetch_column nb:load_balancer _uuid name=lbg0)
+
+check ovn-nbctl \
+    -- ls-add sw0 \
+    -- add logical_switch sw0 load_balancer_group $lbg \
+    -- ls-lb-add sw0 lb0 \
+    -- add load_balancer_group $lbg load_balancer $lbg0
 
 check ovn-nbctl lr-add lr0
 check ovn-nbctl lrp-add lr0 lr0-sw0 00:00:00:00:ff:01 10.0.0.1/24
@@ -3621,6 +3788,7 @@  check_stateful_flows() {
   table=12(ls_in_stateful     ), priority=100  , match=(reg0[[1]] == 1 && reg0[[13]] == 0), action=(ct_commit { ct_label.blocked = 0; }; next;)
   table=12(ls_in_stateful     ), priority=100  , match=(reg0[[1]] == 1 && reg0[[13]] == 1), action=(ct_commit { ct_label.blocked = 0; ct_label.label = reg3; }; next;)
   table=12(ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.10 && tcp.dst == 80), action=(reg1 = 10.0.0.10; reg2[[0..15]] = 80; ct_lb(backends=10.0.0.4:8080);)
+  table=12(ls_in_stateful     ), priority=120  , match=(ct.new && ip4.dst == 10.0.0.20 && tcp.dst == 80), action=(reg1 = 10.0.0.20; reg2[[0..15]] = 80; ct_lb(backends=10.0.0.40:8080);)
 ])
 
     AT_CHECK([grep "ls_out_pre_lb" sw0flows | sort], [0], [dnl
@@ -3654,8 +3822,10 @@  check ovn-nbctl --wait=sb acl-add sw0 to-lport 1002 "ip4 && tcp && tcp.src == 80
 
 check_stateful_flows
 
-# Remove load balancer from sw0
-check ovn-nbctl --wait=sb ls-lb-del sw0 lb0
+# Remove load balancers from sw0
+check ovn-nbctl ls-lb-del sw0 lb0
+check ovn-nbctl clear logical_switch sw0 load_balancer_group
+check ovn-nbctl --wait=sb sync
 
 ovn-sbctl dump-flows sw0 > sw0flows
 AT_CAPTURE_FILE([sw0flows])
diff --git a/utilities/ovn-nbctl.c b/utilities/ovn-nbctl.c
index e34bb65f7..b6f93e0a5 100644
--- a/utilities/ovn-nbctl.c
+++ b/utilities/ovn-nbctl.c
@@ -6803,6 +6803,9 @@  static const struct ctl_table_class tables[NBREC_N_TABLES] = {
     [NBREC_TABLE_LOAD_BALANCER].row_ids[0]
     = {&nbrec_load_balancer_col_name, NULL, NULL},
 
+    [NBREC_TABLE_LOAD_BALANCER_GROUP].row_ids[0]
+    = {&nbrec_load_balancer_group_col_name, NULL, NULL},
+
     [NBREC_TABLE_LOAD_BALANCER_HEALTH_CHECK].row_ids[0]
     = {&nbrec_load_balancer_health_check_col_vip, NULL, NULL},