diff mbox series

[ovs-dev] northd: add the capability to inherit logical routers lbs groups on logical switches

Message ID 1326d566a659628a5118cf47160cb8244e31c648.1663159274.git.lorenzo.bianconi@redhat.com
State Changes Requested
Headers show
Series [ovs-dev] northd: add the capability to inherit logical routers lbs groups on logical switches | expand

Checks

Context Check Description
ovsrobot/apply-robot fail apply and check: fail

Commit Message

Lorenzo Bianconi Sept. 14, 2022, 12:42 p.m. UTC
Similar to single load balancers, add the capability to automatically
deploy a load-balancer group on each logical-switch connected to a
logical router where the load-balancer group has been installed by the
CMS.

Reported-at: https://bugzilla.redhat.com/show_bug.cgi?id=2125310
Signed-off-by: Lorenzo Bianconi <lorenzo.bianconi@redhat.com>
---
 lib/lb.c            |  5 +++++
 lib/lb.h            |  3 +++
 northd/northd.c     | 41 +++++++++++++++++++++++++----------------
 tests/ovn-northd.at |  8 ++++++++
 4 files changed, 41 insertions(+), 16 deletions(-)

Comments

Mark Michelson Oct. 18, 2022, 7:35 p.m. UTC | #1
Hi Lorenzo, thanks for the enhancement!

Acked-by: Mark Michelson <mmichels@redhat.com>

On 9/14/22 08:42, Lorenzo Bianconi wrote:
> Similar to single load balancers, add the capability to automatically
> deploy a load-balancer group on each logical-switch connected to a
> logical router where the load-balancer group has been installed by the
> CMS.
> 
> Reported-at: https://bugzilla.redhat.com/show_bug.cgi?id=2125310
> Signed-off-by: Lorenzo Bianconi <lorenzo.bianconi@redhat.com>
> ---
>   lib/lb.c            |  5 +++++
>   lib/lb.h            |  3 +++
>   northd/northd.c     | 41 +++++++++++++++++++++++++----------------
>   tests/ovn-northd.at |  8 ++++++++
>   4 files changed, 41 insertions(+), 16 deletions(-)
> 
> diff --git a/lib/lb.c b/lib/lb.c
> index fa1a66d82..895828a09 100644
> --- a/lib/lb.c
> +++ b/lib/lb.c
> @@ -182,6 +182,7 @@ ovn_northd_lb_create(const struct nbrec_load_balancer *nbrec_lb)
>                                            : LB_NEIGH_RESPOND_ALL;
>       sset_init(&lb->ips_v4);
>       sset_init(&lb->ips_v6);
> +    sset_init(&lb->ods);
>       struct smap_node *node;
>       size_t n_vips = 0;
>   
> @@ -280,6 +281,7 @@ ovn_northd_lb_destroy(struct ovn_northd_lb *lb)
>       free(lb->vips_nb);
>       sset_destroy(&lb->ips_v4);
>       sset_destroy(&lb->ips_v6);
> +    sset_destroy(&lb->ods);
>       free(lb->selection_fields);
>       free(lb->nb_lr);
>       free(lb->nb_ls);
> @@ -304,6 +306,8 @@ ovn_lb_group_create(const struct nbrec_load_balancer_group *nbrec_lb_group,
>       lb_group->ls = xmalloc(max_datapaths * sizeof *lb_group->ls);
>       lb_group->lr = xmalloc(max_datapaths * sizeof *lb_group->lr);
>   
> +    sset_init(&lb_group->ods);
> +
>       for (size_t i = 0; i < nbrec_lb_group->n_load_balancer; i++) {
>           const struct uuid *lb_uuid =
>               &nbrec_lb_group->load_balancer[i]->header_.uuid;
> @@ -320,6 +324,7 @@ ovn_lb_group_destroy(struct ovn_lb_group *lb_group)
>           return;
>       }
>   
> +    sset_destroy(&lb_group->ods);
>       free(lb_group->lbs);
>       free(lb_group->ls);
>       free(lb_group->lr);
> diff --git a/lib/lb.h b/lib/lb.h
> index e0b153fb3..58530e222 100644
> --- a/lib/lb.h
> +++ b/lib/lb.h
> @@ -55,6 +55,7 @@ struct ovn_northd_lb {
>   
>       struct sset ips_v4;
>       struct sset ips_v6;
> +    struct sset ods;
>   
>       size_t n_nb_ls;
>       size_t n_allocated_nb_ls;
> @@ -118,6 +119,8 @@ struct ovn_lb_group {
>       struct ovn_datapath **ls;
>       size_t n_lr;
>       struct ovn_datapath **lr;
> +
> +    struct sset ods;
>   };
>   
>   struct ovn_lb_group *ovn_lb_group_create(
> diff --git a/northd/northd.c b/northd/northd.c
> index 346c0c7c8..830a6fa48 100644
> --- a/northd/northd.c
> +++ b/northd/northd.c
> @@ -4068,7 +4068,8 @@ build_lrouter_lbs_reachable_ips(struct hmap *datapaths, struct hmap *lbs,
>   }
>   
>   static void
> -build_lswitch_lbs_from_lrouter(struct hmap *datapaths, struct hmap *lbs)
> +build_lswitch_lbs_from_lrouter(struct hmap *datapaths, struct hmap *lbs,
> +                               struct hmap *lb_groups)
>   {
>       if (!install_ls_lb_from_router) {
>           return;
> @@ -4082,16 +4083,12 @@ build_lswitch_lbs_from_lrouter(struct hmap *datapaths, struct hmap *lbs)
>   
>           struct ovn_port *op;
>           LIST_FOR_EACH (op, dp_node, &od->port_list) {
> -            if (!lsp_is_router(op->nbsp)) {
> -                continue;
> -            }
> -            if (!op->peer) {
> +            if (!lsp_is_router(op->nbsp) || !op->peer) {
>                   continue;
>               }
>   
>               struct ovn_datapath *peer_od = op->peer->od;
>               for (size_t i = 0; i < peer_od->nbr->n_load_balancer; i++) {
> -                bool installed = false;
>                   const struct uuid *lb_uuid =
>                       &peer_od->nbr->load_balancer[i]->header_.uuid;
>                   struct ovn_northd_lb *lb = ovn_northd_lb_find(lbs, lb_uuid);
> @@ -4099,19 +4096,31 @@ build_lswitch_lbs_from_lrouter(struct hmap *datapaths, struct hmap *lbs)
>                       continue;
>                   }
>   
> -                for (size_t j = 0; j < lb->n_nb_ls; j++) {
> -                   if (lb->nb_ls[j] == od) {
> -                       installed = true;
> -                       break;
> -                   }
> -                }
> -                if (!installed) {
> +                if (sset_add(&lb->ods, od->nbs->name)) {
>                       ovn_northd_lb_add_ls(lb, 1, &od);
>                   }
> -                if (lb->nlb) {
> -                    od->has_lb_vip |= lb_has_vip(lb->nlb);
> +            }
> +
> +            for (size_t i = 0; i < peer_od->nbr->n_load_balancer_group; i++) {
> +                const struct nbrec_load_balancer_group *nbrec_lb_group;
> +                struct ovn_lb_group *lb_group;
> +
> +                nbrec_lb_group = peer_od->nbr->load_balancer_group[i];
> +                lb_group = ovn_lb_group_find(lb_groups,
> +                                             &nbrec_lb_group->header_.uuid);
> +                if (!lb_group) {
> +                    continue;
> +                }
> +
> +                if (sset_add(&lb_group->ods, od->nbs->name)) {
> +                    ovn_lb_group_add_ls(lb_group, od);
> +                    for (size_t j = 0; j < lb_group->n_lbs; j++) {
> +                        ovn_northd_lb_add_ls(lb_group->lbs[j], 1, &od);
> +                    }
>                   }
>               }
> +
> +            od->has_lb_vip = ls_has_lb_vip(od);
>           }
>       }
>   }
> @@ -4129,7 +4138,7 @@ build_lb_port_related_data(struct hmap *datapaths, struct hmap *ports,
>       build_lrouter_lbs_check(datapaths);
>       build_lrouter_lbs_reachable_ips(datapaths, lbs, lb_groups);
>       build_lb_svcs(input_data, ovnsb_txn, ports, lbs);
> -    build_lswitch_lbs_from_lrouter(datapaths, lbs);
> +    build_lswitch_lbs_from_lrouter(datapaths, lbs, lb_groups);
>   }
>   
>   
> diff --git a/tests/ovn-northd.at b/tests/ovn-northd.at
> index d5136ac6d..e9bb22d9e 100644
> --- a/tests/ovn-northd.at
> +++ b/tests/ovn-northd.at
> @@ -7730,6 +7730,12 @@ ovn-nbctl lsp-set-options S1-R1 router-port=R1-S1
>   ovn-nbctl lb-add lb0 172.16.0.10:80 10.0.0.2:80
>   ovn-nbctl lr-lb-add R1 lb0
>   
> +ovn-nbctl lb-add lb1 172.16.0.11:8080 10.0.0.2:8080
> +lb1_uuid=$(fetch_column nb:load_balancer _uuid name=lb1)
> +lbg=$(ovn-nbctl create load_balancer_group name=lbg -- \
> +    add load_balancer_group lbg load_balancer $lb1_uuid)
> +ovn-nbctl add logical_router R1 load_balancer_group $lbg
> +
>   ovn-sbctl dump-flows S0 > S0flows
>   ovn-sbctl dump-flows S1 > S1flows
>   
> @@ -7754,10 +7760,12 @@ AT_CAPTURE_FILE([S1flows])
>   AT_CHECK([grep "ls_in_lb" S0flows | sort], [0], [dnl
>     table=11(ls_in_lb           ), priority=0    , match=(1), action=(next;)
>     table=11(ls_in_lb           ), priority=120  , match=(ct.new && ip4.dst == 172.16.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.2:80);)
> +  table=11(ls_in_lb           ), priority=120  , match=(ct.new && ip4.dst == 172.16.0.11 && tcp.dst == 8080), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.2:8080);)
>   ])
>   AT_CHECK([grep "ls_in_lb" S1flows | sort], [0], [dnl
>     table=11(ls_in_lb           ), priority=0    , match=(1), action=(next;)
>     table=11(ls_in_lb           ), priority=120  , match=(ct.new && ip4.dst == 172.16.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.2:80);)
> +  table=11(ls_in_lb           ), priority=120  , match=(ct.new && ip4.dst == 172.16.0.11 && tcp.dst == 8080), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.2:8080);)
>   ])
>   
>   ovn-sbctl get datapath S0 _uuid > dp_uuids
Dumitru Ceara Oct. 19, 2022, 3:46 p.m. UTC | #2
On 9/14/22 14:42, Lorenzo Bianconi wrote:
> Similar to single load balancers, add the capability to automatically
> deploy a load-balancer group on each logical-switch connected to a
> logical router where the load-balancer group has been installed by the
> CMS.
> 
> Reported-at: https://bugzilla.redhat.com/show_bug.cgi?id=2125310
> Signed-off-by: Lorenzo Bianconi <lorenzo.bianconi@redhat.com>
> ---

Hi Lorenzo,

>  lib/lb.c            |  5 +++++
>  lib/lb.h            |  3 +++
>  northd/northd.c     | 41 +++++++++++++++++++++++++----------------
>  tests/ovn-northd.at |  8 ++++++++
>  4 files changed, 41 insertions(+), 16 deletions(-)
> 
> diff --git a/lib/lb.c b/lib/lb.c
> index fa1a66d82..895828a09 100644
> --- a/lib/lb.c
> +++ b/lib/lb.c
> @@ -182,6 +182,7 @@ ovn_northd_lb_create(const struct nbrec_load_balancer *nbrec_lb)
>                                           : LB_NEIGH_RESPOND_ALL;
>      sset_init(&lb->ips_v4);
>      sset_init(&lb->ips_v6);
> +    sset_init(&lb->ods);
>      struct smap_node *node;
>      size_t n_vips = 0;
>  
> @@ -280,6 +281,7 @@ ovn_northd_lb_destroy(struct ovn_northd_lb *lb)
>      free(lb->vips_nb);
>      sset_destroy(&lb->ips_v4);
>      sset_destroy(&lb->ips_v6);
> +    sset_destroy(&lb->ods);
>      free(lb->selection_fields);
>      free(lb->nb_lr);
>      free(lb->nb_ls);
> @@ -304,6 +306,8 @@ ovn_lb_group_create(const struct nbrec_load_balancer_group *nbrec_lb_group,
>      lb_group->ls = xmalloc(max_datapaths * sizeof *lb_group->ls);
>      lb_group->lr = xmalloc(max_datapaths * sizeof *lb_group->lr);
>  
> +    sset_init(&lb_group->ods);
> +
>      for (size_t i = 0; i < nbrec_lb_group->n_load_balancer; i++) {
>          const struct uuid *lb_uuid =
>              &nbrec_lb_group->load_balancer[i]->header_.uuid;
> @@ -320,6 +324,7 @@ ovn_lb_group_destroy(struct ovn_lb_group *lb_group)
>          return;
>      }
>  
> +    sset_destroy(&lb_group->ods);
>      free(lb_group->lbs);
>      free(lb_group->ls);
>      free(lb_group->lr);
> diff --git a/lib/lb.h b/lib/lb.h
> index e0b153fb3..58530e222 100644
> --- a/lib/lb.h
> +++ b/lib/lb.h
> @@ -55,6 +55,7 @@ struct ovn_northd_lb {
>  
>      struct sset ips_v4;
>      struct sset ips_v6;
> +    struct sset ods;
>  
>      size_t n_nb_ls;
>      size_t n_allocated_nb_ls;
> @@ -118,6 +119,8 @@ struct ovn_lb_group {
>      struct ovn_datapath **ls;
>      size_t n_lr;
>      struct ovn_datapath **lr;
> +
> +    struct sset ods;
>  };
>  
>  struct ovn_lb_group *ovn_lb_group_create(
> diff --git a/northd/northd.c b/northd/northd.c
> index 346c0c7c8..830a6fa48 100644
> --- a/northd/northd.c
> +++ b/northd/northd.c
> @@ -4068,7 +4068,8 @@ build_lrouter_lbs_reachable_ips(struct hmap *datapaths, struct hmap *lbs,
>  }
>  
>  static void
> -build_lswitch_lbs_from_lrouter(struct hmap *datapaths, struct hmap *lbs)
> +build_lswitch_lbs_from_lrouter(struct hmap *datapaths, struct hmap *lbs,
> +                               struct hmap *lb_groups)
>  {
>      if (!install_ls_lb_from_router) {
>          return;
> @@ -4082,16 +4083,12 @@ build_lswitch_lbs_from_lrouter(struct hmap *datapaths, struct hmap *lbs)
>  
>          struct ovn_port *op;
>          LIST_FOR_EACH (op, dp_node, &od->port_list) {
> -            if (!lsp_is_router(op->nbsp)) {
> -                continue;
> -            }
> -            if (!op->peer) {
> +            if (!lsp_is_router(op->nbsp) || !op->peer) {

It's not introduced by this patch but we're touching this so we might as
well change this part too:  there's no need to iterate through all ports
of the switch.  We already have ovn_datapath->router_ports.

While we're at it, I'm confused about why we don't do all this the other
way around:

First, for each router build the array of peer switches (we have all the
info in ovn_datapath_add_router_port()).

Then for each LB, for each router the LB is applied on, call
ovn_northd_lb_add_ls(lb, rtr->n_peer_switches, rtr->peer_switches).

Then for every LB group, for each router the LB group is applied on:

- call ovn_lb_group_add_ls(lb_group, rtr->n_peer_switches,
rtr->peer_switches)
- for every LB in the group call ovn_northd_lb_add_ls(lb,
rtr->n_peer_switches, rtr->peer_switches)

I think this should save a bunch of ovn_northd_lb_find() and
ovn_lb_group_find() calls.  It would also use ovn_northd_lb_add_ls()
with for more switches at a time so it will avoid some of the reallocations.

We would have to change ovn_lb_group_add_ls() to work with arrays of
datapaths but that shouldn't be a big problem.  What do you think?

>                  continue;
>              }
>  
>              struct ovn_datapath *peer_od = op->peer->od;
>              for (size_t i = 0; i < peer_od->nbr->n_load_balancer; i++) {
> -                bool installed = false;
>                  const struct uuid *lb_uuid =
>                      &peer_od->nbr->load_balancer[i]->header_.uuid;
>                  struct ovn_northd_lb *lb = ovn_northd_lb_find(lbs, lb_uuid);
> @@ -4099,19 +4096,31 @@ build_lswitch_lbs_from_lrouter(struct hmap *datapaths, struct hmap *lbs)
>                      continue;
>                  }
>  
> -                for (size_t j = 0; j < lb->n_nb_ls; j++) {
> -                   if (lb->nb_ls[j] == od) {
> -                       installed = true;
> -                       break;
> -                   }
> -                }
> -                if (!installed) {
> +                if (sset_add(&lb->ods, od->nbs->name)) {

Also, I don't think we really need the sset() or to avoid duplicates in
general.  Duplicates don't really hurt and, in a real deployment, we
would expect the CMS to only configure the load balancer in one place
(on the LR or on the LB group associated to the LR).

Regards,
Dumitru

>                      ovn_northd_lb_add_ls(lb, 1, &od);
>                  }
> -                if (lb->nlb) {
> -                    od->has_lb_vip |= lb_has_vip(lb->nlb);
> +            }
> +
> +            for (size_t i = 0; i < peer_od->nbr->n_load_balancer_group; i++) {
> +                const struct nbrec_load_balancer_group *nbrec_lb_group;
> +                struct ovn_lb_group *lb_group;
> +
> +                nbrec_lb_group = peer_od->nbr->load_balancer_group[i];
> +                lb_group = ovn_lb_group_find(lb_groups,
> +                                             &nbrec_lb_group->header_.uuid);
> +                if (!lb_group) {
> +                    continue;
> +                }
> +
> +                if (sset_add(&lb_group->ods, od->nbs->name)) {
> +                    ovn_lb_group_add_ls(lb_group, od);
> +                    for (size_t j = 0; j < lb_group->n_lbs; j++) {
> +                        ovn_northd_lb_add_ls(lb_group->lbs[j], 1, &od);
> +                    }
>                  }
>              }
> +
> +            od->has_lb_vip = ls_has_lb_vip(od);
>          }
>      }
>  }
> @@ -4129,7 +4138,7 @@ build_lb_port_related_data(struct hmap *datapaths, struct hmap *ports,
>      build_lrouter_lbs_check(datapaths);
>      build_lrouter_lbs_reachable_ips(datapaths, lbs, lb_groups);
>      build_lb_svcs(input_data, ovnsb_txn, ports, lbs);
> -    build_lswitch_lbs_from_lrouter(datapaths, lbs);
> +    build_lswitch_lbs_from_lrouter(datapaths, lbs, lb_groups);
>  }
>  
>  
> diff --git a/tests/ovn-northd.at b/tests/ovn-northd.at
> index d5136ac6d..e9bb22d9e 100644
> --- a/tests/ovn-northd.at
> +++ b/tests/ovn-northd.at
> @@ -7730,6 +7730,12 @@ ovn-nbctl lsp-set-options S1-R1 router-port=R1-S1
>  ovn-nbctl lb-add lb0 172.16.0.10:80 10.0.0.2:80
>  ovn-nbctl lr-lb-add R1 lb0
>  
> +ovn-nbctl lb-add lb1 172.16.0.11:8080 10.0.0.2:8080
> +lb1_uuid=$(fetch_column nb:load_balancer _uuid name=lb1)
> +lbg=$(ovn-nbctl create load_balancer_group name=lbg -- \
> +    add load_balancer_group lbg load_balancer $lb1_uuid)
> +ovn-nbctl add logical_router R1 load_balancer_group $lbg
> +
>  ovn-sbctl dump-flows S0 > S0flows
>  ovn-sbctl dump-flows S1 > S1flows
>  
> @@ -7754,10 +7760,12 @@ AT_CAPTURE_FILE([S1flows])
>  AT_CHECK([grep "ls_in_lb" S0flows | sort], [0], [dnl
>    table=11(ls_in_lb           ), priority=0    , match=(1), action=(next;)
>    table=11(ls_in_lb           ), priority=120  , match=(ct.new && ip4.dst == 172.16.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.2:80);)
> +  table=11(ls_in_lb           ), priority=120  , match=(ct.new && ip4.dst == 172.16.0.11 && tcp.dst == 8080), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.2:8080);)
>  ])
>  AT_CHECK([grep "ls_in_lb" S1flows | sort], [0], [dnl
>    table=11(ls_in_lb           ), priority=0    , match=(1), action=(next;)
>    table=11(ls_in_lb           ), priority=120  , match=(ct.new && ip4.dst == 172.16.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.2:80);)
> +  table=11(ls_in_lb           ), priority=120  , match=(ct.new && ip4.dst == 172.16.0.11 && tcp.dst == 8080), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.2:8080);)
>  ])
>  
>  ovn-sbctl get datapath S0 _uuid > dp_uuids
Lorenzo Bianconi Oct. 19, 2022, 9 p.m. UTC | #3
> On 9/14/22 14:42, Lorenzo Bianconi wrote:
> > Similar to single load balancers, add the capability to automatically
> > deploy a load-balancer group on each logical-switch connected to a
> > logical router where the load-balancer group has been installed by the
> > CMS.
> > 
> > Reported-at: https://bugzilla.redhat.com/show_bug.cgi?id=2125310
> > Signed-off-by: Lorenzo Bianconi <lorenzo.bianconi@redhat.com>
> > ---
> 
> Hi Lorenzo,

Hi Dumitru,

thx for the review.

> 
> >  lib/lb.c            |  5 +++++
> >  lib/lb.h            |  3 +++
> >  northd/northd.c     | 41 +++++++++++++++++++++++++----------------
> >  tests/ovn-northd.at |  8 ++++++++
> >  4 files changed, 41 insertions(+), 16 deletions(-)
> > 
> > diff --git a/lib/lb.c b/lib/lb.c
> > index fa1a66d82..895828a09 100644
> > --- a/lib/lb.c
> > +++ b/lib/lb.c
> > @@ -182,6 +182,7 @@ ovn_northd_lb_create(const struct nbrec_load_balancer *nbrec_lb)
> >                                           : LB_NEIGH_RESPOND_ALL;
> >      sset_init(&lb->ips_v4);
> >      sset_init(&lb->ips_v6);
> > +    sset_init(&lb->ods);
> >      struct smap_node *node;
> >      size_t n_vips = 0;
> >  
> > @@ -280,6 +281,7 @@ ovn_northd_lb_destroy(struct ovn_northd_lb *lb)
> >      free(lb->vips_nb);
> >      sset_destroy(&lb->ips_v4);
> >      sset_destroy(&lb->ips_v6);
> > +    sset_destroy(&lb->ods);
> >      free(lb->selection_fields);
> >      free(lb->nb_lr);
> >      free(lb->nb_ls);
> > @@ -304,6 +306,8 @@ ovn_lb_group_create(const struct nbrec_load_balancer_group *nbrec_lb_group,
> >      lb_group->ls = xmalloc(max_datapaths * sizeof *lb_group->ls);
> >      lb_group->lr = xmalloc(max_datapaths * sizeof *lb_group->lr);
> >  
> > +    sset_init(&lb_group->ods);
> > +
> >      for (size_t i = 0; i < nbrec_lb_group->n_load_balancer; i++) {
> >          const struct uuid *lb_uuid =
> >              &nbrec_lb_group->load_balancer[i]->header_.uuid;
> > @@ -320,6 +324,7 @@ ovn_lb_group_destroy(struct ovn_lb_group *lb_group)
> >          return;
> >      }
> >  
> > +    sset_destroy(&lb_group->ods);
> >      free(lb_group->lbs);
> >      free(lb_group->ls);
> >      free(lb_group->lr);
> > diff --git a/lib/lb.h b/lib/lb.h
> > index e0b153fb3..58530e222 100644
> > --- a/lib/lb.h
> > +++ b/lib/lb.h
> > @@ -55,6 +55,7 @@ struct ovn_northd_lb {
> >  
> >      struct sset ips_v4;
> >      struct sset ips_v6;
> > +    struct sset ods;
> >  
> >      size_t n_nb_ls;
> >      size_t n_allocated_nb_ls;
> > @@ -118,6 +119,8 @@ struct ovn_lb_group {
> >      struct ovn_datapath **ls;
> >      size_t n_lr;
> >      struct ovn_datapath **lr;
> > +
> > +    struct sset ods;
> >  };
> >  
> >  struct ovn_lb_group *ovn_lb_group_create(
> > diff --git a/northd/northd.c b/northd/northd.c
> > index 346c0c7c8..830a6fa48 100644
> > --- a/northd/northd.c
> > +++ b/northd/northd.c
> > @@ -4068,7 +4068,8 @@ build_lrouter_lbs_reachable_ips(struct hmap *datapaths, struct hmap *lbs,
> >  }
> >  
> >  static void
> > -build_lswitch_lbs_from_lrouter(struct hmap *datapaths, struct hmap *lbs)
> > +build_lswitch_lbs_from_lrouter(struct hmap *datapaths, struct hmap *lbs,
> > +                               struct hmap *lb_groups)
> >  {
> >      if (!install_ls_lb_from_router) {
> >          return;
> > @@ -4082,16 +4083,12 @@ build_lswitch_lbs_from_lrouter(struct hmap *datapaths, struct hmap *lbs)
> >  
> >          struct ovn_port *op;
> >          LIST_FOR_EACH (op, dp_node, &od->port_list) {
> > -            if (!lsp_is_router(op->nbsp)) {
> > -                continue;
> > -            }
> > -            if (!op->peer) {
> > +            if (!lsp_is_router(op->nbsp) || !op->peer) {
> 
> It's not introduced by this patch but we're touching this so we might as
> well change this part too:  there's no need to iterate through all ports
> of the switch.  We already have ovn_datapath->router_ports.
> 
> While we're at it, I'm confused about why we don't do all this the other
> way around:
> 
> First, for each router build the array of peer switches (we have all the
> info in ovn_datapath_add_router_port()).
> 
> Then for each LB, for each router the LB is applied on, call
> ovn_northd_lb_add_ls(lb, rtr->n_peer_switches, rtr->peer_switches).
> 
> Then for every LB group, for each router the LB group is applied on:
> 
> - call ovn_lb_group_add_ls(lb_group, rtr->n_peer_switches,
> rtr->peer_switches)
> - for every LB in the group call ovn_northd_lb_add_ls(lb,
> rtr->n_peer_switches, rtr->peer_switches)
> 
> I think this should save a bunch of ovn_northd_lb_find() and
> ovn_lb_group_find() calls.  It would also use ovn_northd_lb_add_ls()
> with for more switches at a time so it will avoid some of the reallocations.
> 
> We would have to change ovn_lb_group_add_ls() to work with arrays of
> datapaths but that shouldn't be a big problem.  What do you think?

it seems doable to me, I will work on it.

> 
> >                  continue;
> >              }
> >  
> >              struct ovn_datapath *peer_od = op->peer->od;
> >              for (size_t i = 0; i < peer_od->nbr->n_load_balancer; i++) {
> > -                bool installed = false;
> >                  const struct uuid *lb_uuid =
> >                      &peer_od->nbr->load_balancer[i]->header_.uuid;
> >                  struct ovn_northd_lb *lb = ovn_northd_lb_find(lbs, lb_uuid);
> > @@ -4099,19 +4096,31 @@ build_lswitch_lbs_from_lrouter(struct hmap *datapaths, struct hmap *lbs)
> >                      continue;
> >                  }
> >  
> > -                for (size_t j = 0; j < lb->n_nb_ls; j++) {
> > -                   if (lb->nb_ls[j] == od) {
> > -                       installed = true;
> > -                       break;
> > -                   }
> > -                }
> > -                if (!installed) {
> > +                if (sset_add(&lb->ods, od->nbs->name)) {
> 
> Also, I don't think we really need the sset() or to avoid duplicates in
> general.  Duplicates don't really hurt and, in a real deployment, we
> would expect the CMS to only configure the load balancer in one place
> (on the LR or on the LB group associated to the LR).

I think we need it since (at least theoretically) we can have the same lb
applied to two logical routers connected to the same logical switch. Am I
missing something?

Regards,
Lorenzo

> 
> Regards,
> Dumitru
> 
> >                      ovn_northd_lb_add_ls(lb, 1, &od);
> >                  }
> > -                if (lb->nlb) {
> > -                    od->has_lb_vip |= lb_has_vip(lb->nlb);
> > +            }
> > +
> > +            for (size_t i = 0; i < peer_od->nbr->n_load_balancer_group; i++) {
> > +                const struct nbrec_load_balancer_group *nbrec_lb_group;
> > +                struct ovn_lb_group *lb_group;
> > +
> > +                nbrec_lb_group = peer_od->nbr->load_balancer_group[i];
> > +                lb_group = ovn_lb_group_find(lb_groups,
> > +                                             &nbrec_lb_group->header_.uuid);
> > +                if (!lb_group) {
> > +                    continue;
> > +                }
> > +
> > +                if (sset_add(&lb_group->ods, od->nbs->name)) {
> > +                    ovn_lb_group_add_ls(lb_group, od);
> > +                    for (size_t j = 0; j < lb_group->n_lbs; j++) {
> > +                        ovn_northd_lb_add_ls(lb_group->lbs[j], 1, &od);
> > +                    }
> >                  }
> >              }
> > +
> > +            od->has_lb_vip = ls_has_lb_vip(od);
> >          }
> >      }
> >  }
> > @@ -4129,7 +4138,7 @@ build_lb_port_related_data(struct hmap *datapaths, struct hmap *ports,
> >      build_lrouter_lbs_check(datapaths);
> >      build_lrouter_lbs_reachable_ips(datapaths, lbs, lb_groups);
> >      build_lb_svcs(input_data, ovnsb_txn, ports, lbs);
> > -    build_lswitch_lbs_from_lrouter(datapaths, lbs);
> > +    build_lswitch_lbs_from_lrouter(datapaths, lbs, lb_groups);
> >  }
> >  
> >  
> > diff --git a/tests/ovn-northd.at b/tests/ovn-northd.at
> > index d5136ac6d..e9bb22d9e 100644
> > --- a/tests/ovn-northd.at
> > +++ b/tests/ovn-northd.at
> > @@ -7730,6 +7730,12 @@ ovn-nbctl lsp-set-options S1-R1 router-port=R1-S1
> >  ovn-nbctl lb-add lb0 172.16.0.10:80 10.0.0.2:80
> >  ovn-nbctl lr-lb-add R1 lb0
> >  
> > +ovn-nbctl lb-add lb1 172.16.0.11:8080 10.0.0.2:8080
> > +lb1_uuid=$(fetch_column nb:load_balancer _uuid name=lb1)
> > +lbg=$(ovn-nbctl create load_balancer_group name=lbg -- \
> > +    add load_balancer_group lbg load_balancer $lb1_uuid)
> > +ovn-nbctl add logical_router R1 load_balancer_group $lbg
> > +
> >  ovn-sbctl dump-flows S0 > S0flows
> >  ovn-sbctl dump-flows S1 > S1flows
> >  
> > @@ -7754,10 +7760,12 @@ AT_CAPTURE_FILE([S1flows])
> >  AT_CHECK([grep "ls_in_lb" S0flows | sort], [0], [dnl
> >    table=11(ls_in_lb           ), priority=0    , match=(1), action=(next;)
> >    table=11(ls_in_lb           ), priority=120  , match=(ct.new && ip4.dst == 172.16.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.2:80);)
> > +  table=11(ls_in_lb           ), priority=120  , match=(ct.new && ip4.dst == 172.16.0.11 && tcp.dst == 8080), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.2:8080);)
> >  ])
> >  AT_CHECK([grep "ls_in_lb" S1flows | sort], [0], [dnl
> >    table=11(ls_in_lb           ), priority=0    , match=(1), action=(next;)
> >    table=11(ls_in_lb           ), priority=120  , match=(ct.new && ip4.dst == 172.16.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.2:80);)
> > +  table=11(ls_in_lb           ), priority=120  , match=(ct.new && ip4.dst == 172.16.0.11 && tcp.dst == 8080), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.2:8080);)
> >  ])
> >  
> >  ovn-sbctl get datapath S0 _uuid > dp_uuids
>
Dumitru Ceara Oct. 19, 2022, 9:10 p.m. UTC | #4
On 10/19/22 23:00, Lorenzo Bianconi wrote:
>> On 9/14/22 14:42, Lorenzo Bianconi wrote:
>>> Similar to single load balancers, add the capability to automatically
>>> deploy a load-balancer group on each logical-switch connected to a
>>> logical router where the load-balancer group has been installed by the
>>> CMS.
>>>
>>> Reported-at: https://bugzilla.redhat.com/show_bug.cgi?id=2125310
>>> Signed-off-by: Lorenzo Bianconi <lorenzo.bianconi@redhat.com>
>>> ---
>>
>> Hi Lorenzo,
> 
> Hi Dumitru,
> 
> thx for the review.
> 
>>
>>>  lib/lb.c            |  5 +++++
>>>  lib/lb.h            |  3 +++
>>>  northd/northd.c     | 41 +++++++++++++++++++++++++----------------
>>>  tests/ovn-northd.at |  8 ++++++++
>>>  4 files changed, 41 insertions(+), 16 deletions(-)
>>>
>>> diff --git a/lib/lb.c b/lib/lb.c
>>> index fa1a66d82..895828a09 100644
>>> --- a/lib/lb.c
>>> +++ b/lib/lb.c
>>> @@ -182,6 +182,7 @@ ovn_northd_lb_create(const struct nbrec_load_balancer *nbrec_lb)
>>>                                           : LB_NEIGH_RESPOND_ALL;
>>>      sset_init(&lb->ips_v4);
>>>      sset_init(&lb->ips_v6);
>>> +    sset_init(&lb->ods);
>>>      struct smap_node *node;
>>>      size_t n_vips = 0;
>>>  
>>> @@ -280,6 +281,7 @@ ovn_northd_lb_destroy(struct ovn_northd_lb *lb)
>>>      free(lb->vips_nb);
>>>      sset_destroy(&lb->ips_v4);
>>>      sset_destroy(&lb->ips_v6);
>>> +    sset_destroy(&lb->ods);
>>>      free(lb->selection_fields);
>>>      free(lb->nb_lr);
>>>      free(lb->nb_ls);
>>> @@ -304,6 +306,8 @@ ovn_lb_group_create(const struct nbrec_load_balancer_group *nbrec_lb_group,
>>>      lb_group->ls = xmalloc(max_datapaths * sizeof *lb_group->ls);
>>>      lb_group->lr = xmalloc(max_datapaths * sizeof *lb_group->lr);
>>>  
>>> +    sset_init(&lb_group->ods);
>>> +
>>>      for (size_t i = 0; i < nbrec_lb_group->n_load_balancer; i++) {
>>>          const struct uuid *lb_uuid =
>>>              &nbrec_lb_group->load_balancer[i]->header_.uuid;
>>> @@ -320,6 +324,7 @@ ovn_lb_group_destroy(struct ovn_lb_group *lb_group)
>>>          return;
>>>      }
>>>  
>>> +    sset_destroy(&lb_group->ods);
>>>      free(lb_group->lbs);
>>>      free(lb_group->ls);
>>>      free(lb_group->lr);
>>> diff --git a/lib/lb.h b/lib/lb.h
>>> index e0b153fb3..58530e222 100644
>>> --- a/lib/lb.h
>>> +++ b/lib/lb.h
>>> @@ -55,6 +55,7 @@ struct ovn_northd_lb {
>>>  
>>>      struct sset ips_v4;
>>>      struct sset ips_v6;
>>> +    struct sset ods;
>>>  
>>>      size_t n_nb_ls;
>>>      size_t n_allocated_nb_ls;
>>> @@ -118,6 +119,8 @@ struct ovn_lb_group {
>>>      struct ovn_datapath **ls;
>>>      size_t n_lr;
>>>      struct ovn_datapath **lr;
>>> +
>>> +    struct sset ods;
>>>  };
>>>  
>>>  struct ovn_lb_group *ovn_lb_group_create(
>>> diff --git a/northd/northd.c b/northd/northd.c
>>> index 346c0c7c8..830a6fa48 100644
>>> --- a/northd/northd.c
>>> +++ b/northd/northd.c
>>> @@ -4068,7 +4068,8 @@ build_lrouter_lbs_reachable_ips(struct hmap *datapaths, struct hmap *lbs,
>>>  }
>>>  
>>>  static void
>>> -build_lswitch_lbs_from_lrouter(struct hmap *datapaths, struct hmap *lbs)
>>> +build_lswitch_lbs_from_lrouter(struct hmap *datapaths, struct hmap *lbs,
>>> +                               struct hmap *lb_groups)
>>>  {
>>>      if (!install_ls_lb_from_router) {
>>>          return;
>>> @@ -4082,16 +4083,12 @@ build_lswitch_lbs_from_lrouter(struct hmap *datapaths, struct hmap *lbs)
>>>  
>>>          struct ovn_port *op;
>>>          LIST_FOR_EACH (op, dp_node, &od->port_list) {
>>> -            if (!lsp_is_router(op->nbsp)) {
>>> -                continue;
>>> -            }
>>> -            if (!op->peer) {
>>> +            if (!lsp_is_router(op->nbsp) || !op->peer) {
>>
>> It's not introduced by this patch but we're touching this so we might as
>> well change this part too:  there's no need to iterate through all ports
>> of the switch.  We already have ovn_datapath->router_ports.
>>
>> While we're at it, I'm confused about why we don't do all this the other
>> way around:
>>
>> First, for each router build the array of peer switches (we have all the
>> info in ovn_datapath_add_router_port()).
>>
>> Then for each LB, for each router the LB is applied on, call
>> ovn_northd_lb_add_ls(lb, rtr->n_peer_switches, rtr->peer_switches).
>>
>> Then for every LB group, for each router the LB group is applied on:
>>
>> - call ovn_lb_group_add_ls(lb_group, rtr->n_peer_switches,
>> rtr->peer_switches)
>> - for every LB in the group call ovn_northd_lb_add_ls(lb,
>> rtr->n_peer_switches, rtr->peer_switches)
>>
>> I think this should save a bunch of ovn_northd_lb_find() and
>> ovn_lb_group_find() calls.  It would also use ovn_northd_lb_add_ls()
>> with for more switches at a time so it will avoid some of the reallocations.
>>
>> We would have to change ovn_lb_group_add_ls() to work with arrays of
>> datapaths but that shouldn't be a big problem.  What do you think?
> 
> it seems doable to me, I will work on it.
> 
>>
>>>                  continue;
>>>              }
>>>  
>>>              struct ovn_datapath *peer_od = op->peer->od;
>>>              for (size_t i = 0; i < peer_od->nbr->n_load_balancer; i++) {
>>> -                bool installed = false;
>>>                  const struct uuid *lb_uuid =
>>>                      &peer_od->nbr->load_balancer[i]->header_.uuid;
>>>                  struct ovn_northd_lb *lb = ovn_northd_lb_find(lbs, lb_uuid);
>>> @@ -4099,19 +4096,31 @@ build_lswitch_lbs_from_lrouter(struct hmap *datapaths, struct hmap *lbs)
>>>                      continue;
>>>                  }
>>>  
>>> -                for (size_t j = 0; j < lb->n_nb_ls; j++) {
>>> -                   if (lb->nb_ls[j] == od) {
>>> -                       installed = true;
>>> -                       break;
>>> -                   }
>>> -                }
>>> -                if (!installed) {
>>> +                if (sset_add(&lb->ods, od->nbs->name)) {
>>
>> Also, I don't think we really need the sset() or to avoid duplicates in
>> general.  Duplicates don't really hurt and, in a real deployment, we
>> would expect the CMS to only configure the load balancer in one place
>> (on the LR or on the LB group associated to the LR).
> 
> I think we need it since (at least theoretically) we can have the same lb
> applied to two logical routers connected to the same logical switch. Am I
> missing something?
> 

You're right, it's possible although I don't think it's too common.  In
the worst case we will just duplicate some lflows.  I think that's fine.

What do you think?

Thanks,
Dumitru

> Regards,
> Lorenzo
> 
>>
>> Regards,
>> Dumitru
>>
>>>                      ovn_northd_lb_add_ls(lb, 1, &od);
>>>                  }
>>> -                if (lb->nlb) {
>>> -                    od->has_lb_vip |= lb_has_vip(lb->nlb);
>>> +            }
>>> +
>>> +            for (size_t i = 0; i < peer_od->nbr->n_load_balancer_group; i++) {
>>> +                const struct nbrec_load_balancer_group *nbrec_lb_group;
>>> +                struct ovn_lb_group *lb_group;
>>> +
>>> +                nbrec_lb_group = peer_od->nbr->load_balancer_group[i];
>>> +                lb_group = ovn_lb_group_find(lb_groups,
>>> +                                             &nbrec_lb_group->header_.uuid);
>>> +                if (!lb_group) {
>>> +                    continue;
>>> +                }
>>> +
>>> +                if (sset_add(&lb_group->ods, od->nbs->name)) {
>>> +                    ovn_lb_group_add_ls(lb_group, od);
>>> +                    for (size_t j = 0; j < lb_group->n_lbs; j++) {
>>> +                        ovn_northd_lb_add_ls(lb_group->lbs[j], 1, &od);
>>> +                    }
>>>                  }
>>>              }
>>> +
>>> +            od->has_lb_vip = ls_has_lb_vip(od);
>>>          }
>>>      }
>>>  }
>>> @@ -4129,7 +4138,7 @@ build_lb_port_related_data(struct hmap *datapaths, struct hmap *ports,
>>>      build_lrouter_lbs_check(datapaths);
>>>      build_lrouter_lbs_reachable_ips(datapaths, lbs, lb_groups);
>>>      build_lb_svcs(input_data, ovnsb_txn, ports, lbs);
>>> -    build_lswitch_lbs_from_lrouter(datapaths, lbs);
>>> +    build_lswitch_lbs_from_lrouter(datapaths, lbs, lb_groups);
>>>  }
>>>  
>>>  
>>> diff --git a/tests/ovn-northd.at b/tests/ovn-northd.at
>>> index d5136ac6d..e9bb22d9e 100644
>>> --- a/tests/ovn-northd.at
>>> +++ b/tests/ovn-northd.at
>>> @@ -7730,6 +7730,12 @@ ovn-nbctl lsp-set-options S1-R1 router-port=R1-S1
>>>  ovn-nbctl lb-add lb0 172.16.0.10:80 10.0.0.2:80
>>>  ovn-nbctl lr-lb-add R1 lb0
>>>  
>>> +ovn-nbctl lb-add lb1 172.16.0.11:8080 10.0.0.2:8080
>>> +lb1_uuid=$(fetch_column nb:load_balancer _uuid name=lb1)
>>> +lbg=$(ovn-nbctl create load_balancer_group name=lbg -- \
>>> +    add load_balancer_group lbg load_balancer $lb1_uuid)
>>> +ovn-nbctl add logical_router R1 load_balancer_group $lbg
>>> +
>>>  ovn-sbctl dump-flows S0 > S0flows
>>>  ovn-sbctl dump-flows S1 > S1flows
>>>  
>>> @@ -7754,10 +7760,12 @@ AT_CAPTURE_FILE([S1flows])
>>>  AT_CHECK([grep "ls_in_lb" S0flows | sort], [0], [dnl
>>>    table=11(ls_in_lb           ), priority=0    , match=(1), action=(next;)
>>>    table=11(ls_in_lb           ), priority=120  , match=(ct.new && ip4.dst == 172.16.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.2:80);)
>>> +  table=11(ls_in_lb           ), priority=120  , match=(ct.new && ip4.dst == 172.16.0.11 && tcp.dst == 8080), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.2:8080);)
>>>  ])
>>>  AT_CHECK([grep "ls_in_lb" S1flows | sort], [0], [dnl
>>>    table=11(ls_in_lb           ), priority=0    , match=(1), action=(next;)
>>>    table=11(ls_in_lb           ), priority=120  , match=(ct.new && ip4.dst == 172.16.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.2:80);)
>>> +  table=11(ls_in_lb           ), priority=120  , match=(ct.new && ip4.dst == 172.16.0.11 && tcp.dst == 8080), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.2:8080);)
>>>  ])
>>>  
>>>  ovn-sbctl get datapath S0 _uuid > dp_uuids
>>
Ilya Maximets Oct. 19, 2022, 9:47 p.m. UTC | #5
On 10/19/22 23:10, Dumitru Ceara wrote:
> On 10/19/22 23:00, Lorenzo Bianconi wrote:
>>> On 9/14/22 14:42, Lorenzo Bianconi wrote:
>>>> Similar to single load balancers, add the capability to automatically
>>>> deploy a load-balancer group on each logical-switch connected to a
>>>> logical router where the load-balancer group has been installed by the
>>>> CMS.
>>>>
>>>> Reported-at: https://bugzilla.redhat.com/show_bug.cgi?id=2125310
>>>> Signed-off-by: Lorenzo Bianconi <lorenzo.bianconi@redhat.com>
>>>> ---
>>>
>>> Hi Lorenzo,
>>
>> Hi Dumitru,
>>
>> thx for the review.
>>
>>>
>>>>  lib/lb.c            |  5 +++++
>>>>  lib/lb.h            |  3 +++
>>>>  northd/northd.c     | 41 +++++++++++++++++++++++++----------------
>>>>  tests/ovn-northd.at |  8 ++++++++
>>>>  4 files changed, 41 insertions(+), 16 deletions(-)
>>>>
>>>> diff --git a/lib/lb.c b/lib/lb.c
>>>> index fa1a66d82..895828a09 100644
>>>> --- a/lib/lb.c
>>>> +++ b/lib/lb.c
>>>> @@ -182,6 +182,7 @@ ovn_northd_lb_create(const struct nbrec_load_balancer *nbrec_lb)
>>>>                                           : LB_NEIGH_RESPOND_ALL;
>>>>      sset_init(&lb->ips_v4);
>>>>      sset_init(&lb->ips_v6);
>>>> +    sset_init(&lb->ods);
>>>>      struct smap_node *node;
>>>>      size_t n_vips = 0;
>>>>  
>>>> @@ -280,6 +281,7 @@ ovn_northd_lb_destroy(struct ovn_northd_lb *lb)
>>>>      free(lb->vips_nb);
>>>>      sset_destroy(&lb->ips_v4);
>>>>      sset_destroy(&lb->ips_v6);
>>>> +    sset_destroy(&lb->ods);
>>>>      free(lb->selection_fields);
>>>>      free(lb->nb_lr);
>>>>      free(lb->nb_ls);
>>>> @@ -304,6 +306,8 @@ ovn_lb_group_create(const struct nbrec_load_balancer_group *nbrec_lb_group,
>>>>      lb_group->ls = xmalloc(max_datapaths * sizeof *lb_group->ls);
>>>>      lb_group->lr = xmalloc(max_datapaths * sizeof *lb_group->lr);
>>>>  
>>>> +    sset_init(&lb_group->ods);
>>>> +
>>>>      for (size_t i = 0; i < nbrec_lb_group->n_load_balancer; i++) {
>>>>          const struct uuid *lb_uuid =
>>>>              &nbrec_lb_group->load_balancer[i]->header_.uuid;
>>>> @@ -320,6 +324,7 @@ ovn_lb_group_destroy(struct ovn_lb_group *lb_group)
>>>>          return;
>>>>      }
>>>>  
>>>> +    sset_destroy(&lb_group->ods);
>>>>      free(lb_group->lbs);
>>>>      free(lb_group->ls);
>>>>      free(lb_group->lr);
>>>> diff --git a/lib/lb.h b/lib/lb.h
>>>> index e0b153fb3..58530e222 100644
>>>> --- a/lib/lb.h
>>>> +++ b/lib/lb.h
>>>> @@ -55,6 +55,7 @@ struct ovn_northd_lb {
>>>>  
>>>>      struct sset ips_v4;
>>>>      struct sset ips_v6;
>>>> +    struct sset ods;
>>>>  
>>>>      size_t n_nb_ls;
>>>>      size_t n_allocated_nb_ls;
>>>> @@ -118,6 +119,8 @@ struct ovn_lb_group {
>>>>      struct ovn_datapath **ls;
>>>>      size_t n_lr;
>>>>      struct ovn_datapath **lr;
>>>> +
>>>> +    struct sset ods;
>>>>  };
>>>>  
>>>>  struct ovn_lb_group *ovn_lb_group_create(
>>>> diff --git a/northd/northd.c b/northd/northd.c
>>>> index 346c0c7c8..830a6fa48 100644
>>>> --- a/northd/northd.c
>>>> +++ b/northd/northd.c
>>>> @@ -4068,7 +4068,8 @@ build_lrouter_lbs_reachable_ips(struct hmap *datapaths, struct hmap *lbs,
>>>>  }
>>>>  
>>>>  static void
>>>> -build_lswitch_lbs_from_lrouter(struct hmap *datapaths, struct hmap *lbs)
>>>> +build_lswitch_lbs_from_lrouter(struct hmap *datapaths, struct hmap *lbs,
>>>> +                               struct hmap *lb_groups)
>>>>  {
>>>>      if (!install_ls_lb_from_router) {
>>>>          return;
>>>> @@ -4082,16 +4083,12 @@ build_lswitch_lbs_from_lrouter(struct hmap *datapaths, struct hmap *lbs)
>>>>  
>>>>          struct ovn_port *op;
>>>>          LIST_FOR_EACH (op, dp_node, &od->port_list) {
>>>> -            if (!lsp_is_router(op->nbsp)) {
>>>> -                continue;
>>>> -            }
>>>> -            if (!op->peer) {
>>>> +            if (!lsp_is_router(op->nbsp) || !op->peer) {
>>>
>>> It's not introduced by this patch but we're touching this so we might as
>>> well change this part too:  there's no need to iterate through all ports
>>> of the switch.  We already have ovn_datapath->router_ports.
>>>
>>> While we're at it, I'm confused about why we don't do all this the other
>>> way around:
>>>
>>> First, for each router build the array of peer switches (we have all the
>>> info in ovn_datapath_add_router_port()).
>>>
>>> Then for each LB, for each router the LB is applied on, call
>>> ovn_northd_lb_add_ls(lb, rtr->n_peer_switches, rtr->peer_switches).
>>>
>>> Then for every LB group, for each router the LB group is applied on:
>>>
>>> - call ovn_lb_group_add_ls(lb_group, rtr->n_peer_switches,
>>> rtr->peer_switches)
>>> - for every LB in the group call ovn_northd_lb_add_ls(lb,
>>> rtr->n_peer_switches, rtr->peer_switches)
>>>
>>> I think this should save a bunch of ovn_northd_lb_find() and
>>> ovn_lb_group_find() calls.  It would also use ovn_northd_lb_add_ls()
>>> with for more switches at a time so it will avoid some of the reallocations.
>>>
>>> We would have to change ovn_lb_group_add_ls() to work with arrays of
>>> datapaths but that shouldn't be a big problem.  What do you think?
>>
>> it seems doable to me, I will work on it.
>>
>>>
>>>>                  continue;
>>>>              }
>>>>  
>>>>              struct ovn_datapath *peer_od = op->peer->od;
>>>>              for (size_t i = 0; i < peer_od->nbr->n_load_balancer; i++) {
>>>> -                bool installed = false;
>>>>                  const struct uuid *lb_uuid =
>>>>                      &peer_od->nbr->load_balancer[i]->header_.uuid;
>>>>                  struct ovn_northd_lb *lb = ovn_northd_lb_find(lbs, lb_uuid);
>>>> @@ -4099,19 +4096,31 @@ build_lswitch_lbs_from_lrouter(struct hmap *datapaths, struct hmap *lbs)
>>>>                      continue;
>>>>                  }
>>>>  
>>>> -                for (size_t j = 0; j < lb->n_nb_ls; j++) {
>>>> -                   if (lb->nb_ls[j] == od) {
>>>> -                       installed = true;
>>>> -                       break;
>>>> -                   }
>>>> -                }
>>>> -                if (!installed) {
>>>> +                if (sset_add(&lb->ods, od->nbs->name)) {
>>>
>>> Also, I don't think we really need the sset() or to avoid duplicates in
>>> general.  Duplicates don't really hurt and, in a real deployment, we
>>> would expect the CMS to only configure the load balancer in one place
>>> (on the LR or on the LB group associated to the LR).
>>
>> I think we need it since (at least theoretically) we can have the same lb
>> applied to two logical routers connected to the same logical switch. Am I
>> missing something?
>>
> 
> You're right, it's possible although I don't think it's too common.  In
> the worst case we will just duplicate some lflows.  I think that's fine.
> 
> What do you think?

Sorry for the interrupt, I do not have an opinion if ween need to check
for duplicates or not (I didn't review the code deep enough), but if we
do, we should use bitmaps instead of sset here.  sset is very inefficient
for this purpose consuming a lot of memory and CPU resources.  All the
'od's have 'index' filed which is a unique and sequential index that
can be used in a 'n_datapaths'-sized bitmap.

> 
> Thanks,
> Dumitru
> 
>> Regards,
>> Lorenzo
>>
>>>
>>> Regards,
>>> Dumitru
>>>
>>>>                      ovn_northd_lb_add_ls(lb, 1, &od);
>>>>                  }
>>>> -                if (lb->nlb) {
>>>> -                    od->has_lb_vip |= lb_has_vip(lb->nlb);
>>>> +            }
>>>> +
>>>> +            for (size_t i = 0; i < peer_od->nbr->n_load_balancer_group; i++) {
>>>> +                const struct nbrec_load_balancer_group *nbrec_lb_group;
>>>> +                struct ovn_lb_group *lb_group;
>>>> +
>>>> +                nbrec_lb_group = peer_od->nbr->load_balancer_group[i];
>>>> +                lb_group = ovn_lb_group_find(lb_groups,
>>>> +                                             &nbrec_lb_group->header_.uuid);
>>>> +                if (!lb_group) {
>>>> +                    continue;
>>>> +                }
>>>> +
>>>> +                if (sset_add(&lb_group->ods, od->nbs->name)) {

lb_group->ods should also be a bitmap.

Best regards, Ilya Maximets.

>>>> +                    ovn_lb_group_add_ls(lb_group, od);
>>>> +                    for (size_t j = 0; j < lb_group->n_lbs; j++) {
>>>> +                        ovn_northd_lb_add_ls(lb_group->lbs[j], 1, &od);
>>>> +                    }
>>>>                  }
>>>>              }
>>>> +
>>>> +            od->has_lb_vip = ls_has_lb_vip(od);
>>>>          }
>>>>      }
>>>>  }
>>>> @@ -4129,7 +4138,7 @@ build_lb_port_related_data(struct hmap *datapaths, struct hmap *ports,
>>>>      build_lrouter_lbs_check(datapaths);
>>>>      build_lrouter_lbs_reachable_ips(datapaths, lbs, lb_groups);
>>>>      build_lb_svcs(input_data, ovnsb_txn, ports, lbs);
>>>> -    build_lswitch_lbs_from_lrouter(datapaths, lbs);
>>>> +    build_lswitch_lbs_from_lrouter(datapaths, lbs, lb_groups);
>>>>  }
>>>>  
>>>>  
>>>> diff --git a/tests/ovn-northd.at b/tests/ovn-northd.at
>>>> index d5136ac6d..e9bb22d9e 100644
>>>> --- a/tests/ovn-northd.at
>>>> +++ b/tests/ovn-northd.at
>>>> @@ -7730,6 +7730,12 @@ ovn-nbctl lsp-set-options S1-R1 router-port=R1-S1
>>>>  ovn-nbctl lb-add lb0 172.16.0.10:80 10.0.0.2:80
>>>>  ovn-nbctl lr-lb-add R1 lb0
>>>>  
>>>> +ovn-nbctl lb-add lb1 172.16.0.11:8080 10.0.0.2:8080
>>>> +lb1_uuid=$(fetch_column nb:load_balancer _uuid name=lb1)
>>>> +lbg=$(ovn-nbctl create load_balancer_group name=lbg -- \
>>>> +    add load_balancer_group lbg load_balancer $lb1_uuid)
>>>> +ovn-nbctl add logical_router R1 load_balancer_group $lbg
>>>> +
>>>>  ovn-sbctl dump-flows S0 > S0flows
>>>>  ovn-sbctl dump-flows S1 > S1flows
>>>>  
>>>> @@ -7754,10 +7760,12 @@ AT_CAPTURE_FILE([S1flows])
>>>>  AT_CHECK([grep "ls_in_lb" S0flows | sort], [0], [dnl
>>>>    table=11(ls_in_lb           ), priority=0    , match=(1), action=(next;)
>>>>    table=11(ls_in_lb           ), priority=120  , match=(ct.new && ip4.dst == 172.16.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.2:80);)
>>>> +  table=11(ls_in_lb           ), priority=120  , match=(ct.new && ip4.dst == 172.16.0.11 && tcp.dst == 8080), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.2:8080);)
>>>>  ])
>>>>  AT_CHECK([grep "ls_in_lb" S1flows | sort], [0], [dnl
>>>>    table=11(ls_in_lb           ), priority=0    , match=(1), action=(next;)
>>>>    table=11(ls_in_lb           ), priority=120  , match=(ct.new && ip4.dst == 172.16.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.2:80);)
>>>> +  table=11(ls_in_lb           ), priority=120  , match=(ct.new && ip4.dst == 172.16.0.11 && tcp.dst == 8080), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.2:8080);)
>>>>  ])
>>>>  
>>>>  ovn-sbctl get datapath S0 _uuid > dp_uuids
>>>
>
Lorenzo Bianconi Oct. 19, 2022, 9:56 p.m. UTC | #6
> On 10/19/22 23:00, Lorenzo Bianconi wrote:
> >> On 9/14/22 14:42, Lorenzo Bianconi wrote:
> >>> Similar to single load balancers, add the capability to automatically
> >>> deploy a load-balancer group on each logical-switch connected to a
> >>> logical router where the load-balancer group has been installed by the
> >>> CMS.
> >>>
> >>> Reported-at: https://bugzilla.redhat.com/show_bug.cgi?id=2125310
> >>> Signed-off-by: Lorenzo Bianconi <lorenzo.bianconi@redhat.com>
> >>> ---
> >>
> >> Hi Lorenzo,
> > 
> > Hi Dumitru,
> > 
> > thx for the review.
> > 
> >>
> >>>  lib/lb.c            |  5 +++++
> >>>  lib/lb.h            |  3 +++
> >>>  northd/northd.c     | 41 +++++++++++++++++++++++++----------------
> >>>  tests/ovn-northd.at |  8 ++++++++
> >>>  4 files changed, 41 insertions(+), 16 deletions(-)
> >>>
> >>> diff --git a/lib/lb.c b/lib/lb.c
> >>> index fa1a66d82..895828a09 100644
> >>> --- a/lib/lb.c
> >>> +++ b/lib/lb.c
> >>> @@ -182,6 +182,7 @@ ovn_northd_lb_create(const struct nbrec_load_balancer *nbrec_lb)
> >>>                                           : LB_NEIGH_RESPOND_ALL;
> >>>      sset_init(&lb->ips_v4);
> >>>      sset_init(&lb->ips_v6);
> >>> +    sset_init(&lb->ods);
> >>>      struct smap_node *node;
> >>>      size_t n_vips = 0;
> >>>  
> >>> @@ -280,6 +281,7 @@ ovn_northd_lb_destroy(struct ovn_northd_lb *lb)
> >>>      free(lb->vips_nb);
> >>>      sset_destroy(&lb->ips_v4);
> >>>      sset_destroy(&lb->ips_v6);
> >>> +    sset_destroy(&lb->ods);
> >>>      free(lb->selection_fields);
> >>>      free(lb->nb_lr);
> >>>      free(lb->nb_ls);
> >>> @@ -304,6 +306,8 @@ ovn_lb_group_create(const struct nbrec_load_balancer_group *nbrec_lb_group,
> >>>      lb_group->ls = xmalloc(max_datapaths * sizeof *lb_group->ls);
> >>>      lb_group->lr = xmalloc(max_datapaths * sizeof *lb_group->lr);
> >>>  
> >>> +    sset_init(&lb_group->ods);
> >>> +
> >>>      for (size_t i = 0; i < nbrec_lb_group->n_load_balancer; i++) {
> >>>          const struct uuid *lb_uuid =
> >>>              &nbrec_lb_group->load_balancer[i]->header_.uuid;
> >>> @@ -320,6 +324,7 @@ ovn_lb_group_destroy(struct ovn_lb_group *lb_group)
> >>>          return;
> >>>      }
> >>>  
> >>> +    sset_destroy(&lb_group->ods);
> >>>      free(lb_group->lbs);
> >>>      free(lb_group->ls);
> >>>      free(lb_group->lr);
> >>> diff --git a/lib/lb.h b/lib/lb.h
> >>> index e0b153fb3..58530e222 100644
> >>> --- a/lib/lb.h
> >>> +++ b/lib/lb.h
> >>> @@ -55,6 +55,7 @@ struct ovn_northd_lb {
> >>>  
> >>>      struct sset ips_v4;
> >>>      struct sset ips_v6;
> >>> +    struct sset ods;
> >>>  
> >>>      size_t n_nb_ls;
> >>>      size_t n_allocated_nb_ls;
> >>> @@ -118,6 +119,8 @@ struct ovn_lb_group {
> >>>      struct ovn_datapath **ls;
> >>>      size_t n_lr;
> >>>      struct ovn_datapath **lr;
> >>> +
> >>> +    struct sset ods;
> >>>  };
> >>>  
> >>>  struct ovn_lb_group *ovn_lb_group_create(
> >>> diff --git a/northd/northd.c b/northd/northd.c
> >>> index 346c0c7c8..830a6fa48 100644
> >>> --- a/northd/northd.c
> >>> +++ b/northd/northd.c
> >>> @@ -4068,7 +4068,8 @@ build_lrouter_lbs_reachable_ips(struct hmap *datapaths, struct hmap *lbs,
> >>>  }
> >>>  
> >>>  static void
> >>> -build_lswitch_lbs_from_lrouter(struct hmap *datapaths, struct hmap *lbs)
> >>> +build_lswitch_lbs_from_lrouter(struct hmap *datapaths, struct hmap *lbs,
> >>> +                               struct hmap *lb_groups)
> >>>  {
> >>>      if (!install_ls_lb_from_router) {
> >>>          return;
> >>> @@ -4082,16 +4083,12 @@ build_lswitch_lbs_from_lrouter(struct hmap *datapaths, struct hmap *lbs)
> >>>  
> >>>          struct ovn_port *op;
> >>>          LIST_FOR_EACH (op, dp_node, &od->port_list) {
> >>> -            if (!lsp_is_router(op->nbsp)) {
> >>> -                continue;
> >>> -            }
> >>> -            if (!op->peer) {
> >>> +            if (!lsp_is_router(op->nbsp) || !op->peer) {
> >>
> >> It's not introduced by this patch but we're touching this so we might as
> >> well change this part too:  there's no need to iterate through all ports
> >> of the switch.  We already have ovn_datapath->router_ports.
> >>
> >> While we're at it, I'm confused about why we don't do all this the other
> >> way around:
> >>
> >> First, for each router build the array of peer switches (we have all the
> >> info in ovn_datapath_add_router_port()).
> >>
> >> Then for each LB, for each router the LB is applied on, call
> >> ovn_northd_lb_add_ls(lb, rtr->n_peer_switches, rtr->peer_switches).
> >>
> >> Then for every LB group, for each router the LB group is applied on:
> >>
> >> - call ovn_lb_group_add_ls(lb_group, rtr->n_peer_switches,
> >> rtr->peer_switches)
> >> - for every LB in the group call ovn_northd_lb_add_ls(lb,
> >> rtr->n_peer_switches, rtr->peer_switches)
> >>
> >> I think this should save a bunch of ovn_northd_lb_find() and
> >> ovn_lb_group_find() calls.  It would also use ovn_northd_lb_add_ls()
> >> with for more switches at a time so it will avoid some of the reallocations.
> >>
> >> We would have to change ovn_lb_group_add_ls() to work with arrays of
> >> datapaths but that shouldn't be a big problem.  What do you think?
> > 
> > it seems doable to me, I will work on it.
> > 
> >>
> >>>                  continue;
> >>>              }
> >>>  
> >>>              struct ovn_datapath *peer_od = op->peer->od;
> >>>              for (size_t i = 0; i < peer_od->nbr->n_load_balancer; i++) {
> >>> -                bool installed = false;
> >>>                  const struct uuid *lb_uuid =
> >>>                      &peer_od->nbr->load_balancer[i]->header_.uuid;
> >>>                  struct ovn_northd_lb *lb = ovn_northd_lb_find(lbs, lb_uuid);
> >>> @@ -4099,19 +4096,31 @@ build_lswitch_lbs_from_lrouter(struct hmap *datapaths, struct hmap *lbs)
> >>>                      continue;
> >>>                  }
> >>>  
> >>> -                for (size_t j = 0; j < lb->n_nb_ls; j++) {
> >>> -                   if (lb->nb_ls[j] == od) {
> >>> -                       installed = true;
> >>> -                       break;
> >>> -                   }
> >>> -                }
> >>> -                if (!installed) {
> >>> +                if (sset_add(&lb->ods, od->nbs->name)) {
> >>
> >> Also, I don't think we really need the sset() or to avoid duplicates in
> >> general.  Duplicates don't really hurt and, in a real deployment, we
> >> would expect the CMS to only configure the load balancer in one place
> >> (on the LR or on the LB group associated to the LR).
> > 
> > I think we need it since (at least theoretically) we can have the same lb
> > applied to two logical routers connected to the same logical switch. Am I
> > missing something?
> > 
> 
> You're right, it's possible although I don't think it's too common.  In
> the worst case we will just duplicate some lflows.  I think that's fine.

ack, fine to me. Not considering this case allows us to add logical switches
in bulk for lb_group otherwise we need to loop over them to look for
duplicates.

Regards,
Lorenzo

> 
> What do you think?
> 
> Thanks,
> Dumitru
> 
> > Regards,
> > Lorenzo
> > 
> >>
> >> Regards,
> >> Dumitru
> >>
> >>>                      ovn_northd_lb_add_ls(lb, 1, &od);
> >>>                  }
> >>> -                if (lb->nlb) {
> >>> -                    od->has_lb_vip |= lb_has_vip(lb->nlb);
> >>> +            }
> >>> +
> >>> +            for (size_t i = 0; i < peer_od->nbr->n_load_balancer_group; i++) {
> >>> +                const struct nbrec_load_balancer_group *nbrec_lb_group;
> >>> +                struct ovn_lb_group *lb_group;
> >>> +
> >>> +                nbrec_lb_group = peer_od->nbr->load_balancer_group[i];
> >>> +                lb_group = ovn_lb_group_find(lb_groups,
> >>> +                                             &nbrec_lb_group->header_.uuid);
> >>> +                if (!lb_group) {
> >>> +                    continue;
> >>> +                }
> >>> +
> >>> +                if (sset_add(&lb_group->ods, od->nbs->name)) {
> >>> +                    ovn_lb_group_add_ls(lb_group, od);
> >>> +                    for (size_t j = 0; j < lb_group->n_lbs; j++) {
> >>> +                        ovn_northd_lb_add_ls(lb_group->lbs[j], 1, &od);
> >>> +                    }
> >>>                  }
> >>>              }
> >>> +
> >>> +            od->has_lb_vip = ls_has_lb_vip(od);
> >>>          }
> >>>      }
> >>>  }
> >>> @@ -4129,7 +4138,7 @@ build_lb_port_related_data(struct hmap *datapaths, struct hmap *ports,
> >>>      build_lrouter_lbs_check(datapaths);
> >>>      build_lrouter_lbs_reachable_ips(datapaths, lbs, lb_groups);
> >>>      build_lb_svcs(input_data, ovnsb_txn, ports, lbs);
> >>> -    build_lswitch_lbs_from_lrouter(datapaths, lbs);
> >>> +    build_lswitch_lbs_from_lrouter(datapaths, lbs, lb_groups);
> >>>  }
> >>>  
> >>>  
> >>> diff --git a/tests/ovn-northd.at b/tests/ovn-northd.at
> >>> index d5136ac6d..e9bb22d9e 100644
> >>> --- a/tests/ovn-northd.at
> >>> +++ b/tests/ovn-northd.at
> >>> @@ -7730,6 +7730,12 @@ ovn-nbctl lsp-set-options S1-R1 router-port=R1-S1
> >>>  ovn-nbctl lb-add lb0 172.16.0.10:80 10.0.0.2:80
> >>>  ovn-nbctl lr-lb-add R1 lb0
> >>>  
> >>> +ovn-nbctl lb-add lb1 172.16.0.11:8080 10.0.0.2:8080
> >>> +lb1_uuid=$(fetch_column nb:load_balancer _uuid name=lb1)
> >>> +lbg=$(ovn-nbctl create load_balancer_group name=lbg -- \
> >>> +    add load_balancer_group lbg load_balancer $lb1_uuid)
> >>> +ovn-nbctl add logical_router R1 load_balancer_group $lbg
> >>> +
> >>>  ovn-sbctl dump-flows S0 > S0flows
> >>>  ovn-sbctl dump-flows S1 > S1flows
> >>>  
> >>> @@ -7754,10 +7760,12 @@ AT_CAPTURE_FILE([S1flows])
> >>>  AT_CHECK([grep "ls_in_lb" S0flows | sort], [0], [dnl
> >>>    table=11(ls_in_lb           ), priority=0    , match=(1), action=(next;)
> >>>    table=11(ls_in_lb           ), priority=120  , match=(ct.new && ip4.dst == 172.16.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.2:80);)
> >>> +  table=11(ls_in_lb           ), priority=120  , match=(ct.new && ip4.dst == 172.16.0.11 && tcp.dst == 8080), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.2:8080);)
> >>>  ])
> >>>  AT_CHECK([grep "ls_in_lb" S1flows | sort], [0], [dnl
> >>>    table=11(ls_in_lb           ), priority=0    , match=(1), action=(next;)
> >>>    table=11(ls_in_lb           ), priority=120  , match=(ct.new && ip4.dst == 172.16.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.2:80);)
> >>> +  table=11(ls_in_lb           ), priority=120  , match=(ct.new && ip4.dst == 172.16.0.11 && tcp.dst == 8080), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.2:8080);)
> >>>  ])
> >>>  
> >>>  ovn-sbctl get datapath S0 _uuid > dp_uuids
> >>
>
diff mbox series

Patch

diff --git a/lib/lb.c b/lib/lb.c
index fa1a66d82..895828a09 100644
--- a/lib/lb.c
+++ b/lib/lb.c
@@ -182,6 +182,7 @@  ovn_northd_lb_create(const struct nbrec_load_balancer *nbrec_lb)
                                          : LB_NEIGH_RESPOND_ALL;
     sset_init(&lb->ips_v4);
     sset_init(&lb->ips_v6);
+    sset_init(&lb->ods);
     struct smap_node *node;
     size_t n_vips = 0;
 
@@ -280,6 +281,7 @@  ovn_northd_lb_destroy(struct ovn_northd_lb *lb)
     free(lb->vips_nb);
     sset_destroy(&lb->ips_v4);
     sset_destroy(&lb->ips_v6);
+    sset_destroy(&lb->ods);
     free(lb->selection_fields);
     free(lb->nb_lr);
     free(lb->nb_ls);
@@ -304,6 +306,8 @@  ovn_lb_group_create(const struct nbrec_load_balancer_group *nbrec_lb_group,
     lb_group->ls = xmalloc(max_datapaths * sizeof *lb_group->ls);
     lb_group->lr = xmalloc(max_datapaths * sizeof *lb_group->lr);
 
+    sset_init(&lb_group->ods);
+
     for (size_t i = 0; i < nbrec_lb_group->n_load_balancer; i++) {
         const struct uuid *lb_uuid =
             &nbrec_lb_group->load_balancer[i]->header_.uuid;
@@ -320,6 +324,7 @@  ovn_lb_group_destroy(struct ovn_lb_group *lb_group)
         return;
     }
 
+    sset_destroy(&lb_group->ods);
     free(lb_group->lbs);
     free(lb_group->ls);
     free(lb_group->lr);
diff --git a/lib/lb.h b/lib/lb.h
index e0b153fb3..58530e222 100644
--- a/lib/lb.h
+++ b/lib/lb.h
@@ -55,6 +55,7 @@  struct ovn_northd_lb {
 
     struct sset ips_v4;
     struct sset ips_v6;
+    struct sset ods;
 
     size_t n_nb_ls;
     size_t n_allocated_nb_ls;
@@ -118,6 +119,8 @@  struct ovn_lb_group {
     struct ovn_datapath **ls;
     size_t n_lr;
     struct ovn_datapath **lr;
+
+    struct sset ods;
 };
 
 struct ovn_lb_group *ovn_lb_group_create(
diff --git a/northd/northd.c b/northd/northd.c
index 346c0c7c8..830a6fa48 100644
--- a/northd/northd.c
+++ b/northd/northd.c
@@ -4068,7 +4068,8 @@  build_lrouter_lbs_reachable_ips(struct hmap *datapaths, struct hmap *lbs,
 }
 
 static void
-build_lswitch_lbs_from_lrouter(struct hmap *datapaths, struct hmap *lbs)
+build_lswitch_lbs_from_lrouter(struct hmap *datapaths, struct hmap *lbs,
+                               struct hmap *lb_groups)
 {
     if (!install_ls_lb_from_router) {
         return;
@@ -4082,16 +4083,12 @@  build_lswitch_lbs_from_lrouter(struct hmap *datapaths, struct hmap *lbs)
 
         struct ovn_port *op;
         LIST_FOR_EACH (op, dp_node, &od->port_list) {
-            if (!lsp_is_router(op->nbsp)) {
-                continue;
-            }
-            if (!op->peer) {
+            if (!lsp_is_router(op->nbsp) || !op->peer) {
                 continue;
             }
 
             struct ovn_datapath *peer_od = op->peer->od;
             for (size_t i = 0; i < peer_od->nbr->n_load_balancer; i++) {
-                bool installed = false;
                 const struct uuid *lb_uuid =
                     &peer_od->nbr->load_balancer[i]->header_.uuid;
                 struct ovn_northd_lb *lb = ovn_northd_lb_find(lbs, lb_uuid);
@@ -4099,19 +4096,31 @@  build_lswitch_lbs_from_lrouter(struct hmap *datapaths, struct hmap *lbs)
                     continue;
                 }
 
-                for (size_t j = 0; j < lb->n_nb_ls; j++) {
-                   if (lb->nb_ls[j] == od) {
-                       installed = true;
-                       break;
-                   }
-                }
-                if (!installed) {
+                if (sset_add(&lb->ods, od->nbs->name)) {
                     ovn_northd_lb_add_ls(lb, 1, &od);
                 }
-                if (lb->nlb) {
-                    od->has_lb_vip |= lb_has_vip(lb->nlb);
+            }
+
+            for (size_t i = 0; i < peer_od->nbr->n_load_balancer_group; i++) {
+                const struct nbrec_load_balancer_group *nbrec_lb_group;
+                struct ovn_lb_group *lb_group;
+
+                nbrec_lb_group = peer_od->nbr->load_balancer_group[i];
+                lb_group = ovn_lb_group_find(lb_groups,
+                                             &nbrec_lb_group->header_.uuid);
+                if (!lb_group) {
+                    continue;
+                }
+
+                if (sset_add(&lb_group->ods, od->nbs->name)) {
+                    ovn_lb_group_add_ls(lb_group, od);
+                    for (size_t j = 0; j < lb_group->n_lbs; j++) {
+                        ovn_northd_lb_add_ls(lb_group->lbs[j], 1, &od);
+                    }
                 }
             }
+
+            od->has_lb_vip = ls_has_lb_vip(od);
         }
     }
 }
@@ -4129,7 +4138,7 @@  build_lb_port_related_data(struct hmap *datapaths, struct hmap *ports,
     build_lrouter_lbs_check(datapaths);
     build_lrouter_lbs_reachable_ips(datapaths, lbs, lb_groups);
     build_lb_svcs(input_data, ovnsb_txn, ports, lbs);
-    build_lswitch_lbs_from_lrouter(datapaths, lbs);
+    build_lswitch_lbs_from_lrouter(datapaths, lbs, lb_groups);
 }
 
 
diff --git a/tests/ovn-northd.at b/tests/ovn-northd.at
index d5136ac6d..e9bb22d9e 100644
--- a/tests/ovn-northd.at
+++ b/tests/ovn-northd.at
@@ -7730,6 +7730,12 @@  ovn-nbctl lsp-set-options S1-R1 router-port=R1-S1
 ovn-nbctl lb-add lb0 172.16.0.10:80 10.0.0.2:80
 ovn-nbctl lr-lb-add R1 lb0
 
+ovn-nbctl lb-add lb1 172.16.0.11:8080 10.0.0.2:8080
+lb1_uuid=$(fetch_column nb:load_balancer _uuid name=lb1)
+lbg=$(ovn-nbctl create load_balancer_group name=lbg -- \
+    add load_balancer_group lbg load_balancer $lb1_uuid)
+ovn-nbctl add logical_router R1 load_balancer_group $lbg
+
 ovn-sbctl dump-flows S0 > S0flows
 ovn-sbctl dump-flows S1 > S1flows
 
@@ -7754,10 +7760,12 @@  AT_CAPTURE_FILE([S1flows])
 AT_CHECK([grep "ls_in_lb" S0flows | sort], [0], [dnl
   table=11(ls_in_lb           ), priority=0    , match=(1), action=(next;)
   table=11(ls_in_lb           ), priority=120  , match=(ct.new && ip4.dst == 172.16.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.2:80);)
+  table=11(ls_in_lb           ), priority=120  , match=(ct.new && ip4.dst == 172.16.0.11 && tcp.dst == 8080), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.2:8080);)
 ])
 AT_CHECK([grep "ls_in_lb" S1flows | sort], [0], [dnl
   table=11(ls_in_lb           ), priority=0    , match=(1), action=(next;)
   table=11(ls_in_lb           ), priority=120  , match=(ct.new && ip4.dst == 172.16.0.10 && tcp.dst == 80), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.2:80);)
+  table=11(ls_in_lb           ), priority=120  , match=(ct.new && ip4.dst == 172.16.0.11 && tcp.dst == 8080), action=(reg0[[1]] = 0; ct_lb_mark(backends=10.0.0.2:8080);)
 ])
 
 ovn-sbctl get datapath S0 _uuid > dp_uuids