diff mbox series

[ovs-dev,1/3] northd.c: Generate and maintain SB lflow uuid in ovn_lflow.

Message ID 20230618061755.3962521-2-hzhou@ovn.org
State Accepted
Headers show
Series ovn-northd incremental processing for VIF udpates and deletions end-to-end. | expand

Checks

Context Check Description
ovsrobot/apply-robot success apply and check: success
ovsrobot/github-robot-_Build_and_Test success github build: passed
ovsrobot/github-robot-_ovn-kubernetes fail github build: failed

Commit Message

Han Zhou June 18, 2023, 6:17 a.m. UTC
For incremental processing, we need to maintain SB lflow uuids in
northd. For this reason, we generate the row uuid when creating the
Logical_Flow record in SB DB, rather than waiting for SB DB to populate
back.

Signed-off-by: Han Zhou <hzhou@ovn.org>
---
 northd/northd.c | 12 ++++++++++--
 1 file changed, 10 insertions(+), 2 deletions(-)

Comments

Dumitru Ceara June 29, 2023, 11:57 a.m. UTC | #1
On 6/18/23 08:17, Han Zhou wrote:
> For incremental processing, we need to maintain SB lflow uuids in
> northd. For this reason, we generate the row uuid when creating the
> Logical_Flow record in SB DB, rather than waiting for SB DB to populate
> back.
> 
> Signed-off-by: Han Zhou <hzhou@ovn.org>
> ---
>  northd/northd.c | 12 ++++++++++--
>  1 file changed, 10 insertions(+), 2 deletions(-)
> 
> diff --git a/northd/northd.c b/northd/northd.c
> index a45c8b53ad4e..98f528f93cfc 100644
> --- a/northd/northd.c
> +++ b/northd/northd.c
> @@ -5592,6 +5592,8 @@ struct ovn_lflow {
>                                    * 'dpg_bitmap'. */
>      struct ovn_dp_group *dpg;    /* Link to unique Sb datapath group. */
>      const char *where;
> +
> +    struct uuid sb_uuid;            /* SB DB row uuid, specified by northd. */

Nit: can you please align this comment with the ones above?

>  };
>  
>  static void ovn_lflow_destroy(struct hmap *lflows, struct ovn_lflow *lflow);
> @@ -5649,6 +5651,7 @@ ovn_lflow_init(struct ovn_lflow *lflow, struct ovn_datapath *od,
>      lflow->ctrl_meter = ctrl_meter;
>      lflow->dpg = NULL;
>      lflow->where = where;
> +    lflow->sb_uuid = UUID_ZERO;
>  }
>  
>  /* The lflow_hash_lock is a mutex array that protects updates to the shared
> @@ -16020,6 +16023,7 @@ void build_lflows(struct ovsdb_idl_txn *ovnsb_txn,
>              size_t n_datapaths;
>              bool is_switch;
>  
> +            lflow->sb_uuid = sbflow->header_.uuid;
>              is_switch = ovn_stage_to_datapath_type(lflow->stage) == DP_SWITCH;
>              if (is_switch) {
>                  n_datapaths = ods_size(input_data->ls_datapaths);
> @@ -16095,7 +16099,9 @@ void build_lflows(struct ovsdb_idl_txn *ovnsb_txn,
>              dp_groups = &lr_dp_groups;
>          }
>  
> -        sbflow = sbrec_logical_flow_insert(ovnsb_txn);
> +        lflow->sb_uuid = uuid_random();

If we ever decide to parallelize the lflow synchronization code this
will hurt performance.  Until then we're fine I guess.

> +        sbflow = sbrec_logical_flow_insert_persist_uuid(ovnsb_txn,
> +                                                        &lflow->sb_uuid);
>          if (lflow->od) {
>              sbrec_logical_flow_set_logical_datapath(sbflow, lflow->od->sb);
>          } else {
> @@ -16305,7 +16311,9 @@ bool lflow_handle_northd_ls_changes(struct ovsdb_idl_txn *ovnsb_txn,
>  
>              /* Sync to SB. */
>              const struct sbrec_logical_flow *sbflow;
> -            sbflow = sbrec_logical_flow_insert(ovnsb_txn);
> +            lflow->sb_uuid = uuid_random();
> +            sbflow = sbrec_logical_flow_insert_persist_uuid(ovnsb_txn,
> +                                                            &lflow->sb_uuid);
>              const char *pipeline = ovn_stage_get_pipeline_name(lflow->stage);
>              uint8_t table = ovn_stage_get_table(lflow->stage);
>              sbrec_logical_flow_set_logical_datapath(sbflow, lflow->od->sb);

With the small nit about the comment indentation addressed:

Acked-by: Dumitru Ceara <dceara@redhat.com>
Han Zhou June 30, 2023, 1:31 a.m. UTC | #2
On Thu, Jun 29, 2023 at 4:57 AM Dumitru Ceara <dceara@redhat.com> wrote:
>
> On 6/18/23 08:17, Han Zhou wrote:
> > For incremental processing, we need to maintain SB lflow uuids in
> > northd. For this reason, we generate the row uuid when creating the
> > Logical_Flow record in SB DB, rather than waiting for SB DB to populate
> > back.
> >
> > Signed-off-by: Han Zhou <hzhou@ovn.org>
> > ---
> >  northd/northd.c | 12 ++++++++++--
> >  1 file changed, 10 insertions(+), 2 deletions(-)
> >
> > diff --git a/northd/northd.c b/northd/northd.c
> > index a45c8b53ad4e..98f528f93cfc 100644
> > --- a/northd/northd.c
> > +++ b/northd/northd.c
> > @@ -5592,6 +5592,8 @@ struct ovn_lflow {
> >                                    * 'dpg_bitmap'. */
> >      struct ovn_dp_group *dpg;    /* Link to unique Sb datapath group.
*/
> >      const char *where;
> > +
> > +    struct uuid sb_uuid;            /* SB DB row uuid, specified by
northd. */
>
> Nit: can you please align this comment with the ones above?
>
> >  };
> >
> >  static void ovn_lflow_destroy(struct hmap *lflows, struct ovn_lflow
*lflow);
> > @@ -5649,6 +5651,7 @@ ovn_lflow_init(struct ovn_lflow *lflow, struct
ovn_datapath *od,
> >      lflow->ctrl_meter = ctrl_meter;
> >      lflow->dpg = NULL;
> >      lflow->where = where;
> > +    lflow->sb_uuid = UUID_ZERO;
> >  }
> >
> >  /* The lflow_hash_lock is a mutex array that protects updates to the
shared
> > @@ -16020,6 +16023,7 @@ void build_lflows(struct ovsdb_idl_txn
*ovnsb_txn,
> >              size_t n_datapaths;
> >              bool is_switch;
> >
> > +            lflow->sb_uuid = sbflow->header_.uuid;
> >              is_switch = ovn_stage_to_datapath_type(lflow->stage) ==
DP_SWITCH;
> >              if (is_switch) {
> >                  n_datapaths = ods_size(input_data->ls_datapaths);
> > @@ -16095,7 +16099,9 @@ void build_lflows(struct ovsdb_idl_txn
*ovnsb_txn,
> >              dp_groups = &lr_dp_groups;
> >          }
> >
> > -        sbflow = sbrec_logical_flow_insert(ovnsb_txn);
> > +        lflow->sb_uuid = uuid_random();
>
> If we ever decide to parallelize the lflow synchronization code this
> will hurt performance.  Until then we're fine I guess.

Good point! I will add a comment to remind us before merging this.

>
> > +        sbflow = sbrec_logical_flow_insert_persist_uuid(ovnsb_txn,
> > +
 &lflow->sb_uuid);
> >          if (lflow->od) {
> >              sbrec_logical_flow_set_logical_datapath(sbflow,
lflow->od->sb);
> >          } else {
> > @@ -16305,7 +16311,9 @@ bool lflow_handle_northd_ls_changes(struct
ovsdb_idl_txn *ovnsb_txn,
> >
> >              /* Sync to SB. */
> >              const struct sbrec_logical_flow *sbflow;
> > -            sbflow = sbrec_logical_flow_insert(ovnsb_txn);
> > +            lflow->sb_uuid = uuid_random();
> > +            sbflow = sbrec_logical_flow_insert_persist_uuid(ovnsb_txn,
> > +
 &lflow->sb_uuid);
> >              const char *pipeline =
ovn_stage_get_pipeline_name(lflow->stage);
> >              uint8_t table = ovn_stage_get_table(lflow->stage);
> >              sbrec_logical_flow_set_logical_datapath(sbflow,
lflow->od->sb);
>
> With the small nit about the comment indentation addressed:
>
> Acked-by: Dumitru Ceara <dceara@redhat.com>
>
Numan Siddique June 30, 2023, 5:06 a.m. UTC | #3
On Fri, Jun 30, 2023 at 7:01 AM Han Zhou <hzhou@ovn.org> wrote:
>
> On Thu, Jun 29, 2023 at 4:57 AM Dumitru Ceara <dceara@redhat.com> wrote:
> >
> > On 6/18/23 08:17, Han Zhou wrote:
> > > For incremental processing, we need to maintain SB lflow uuids in
> > > northd. For this reason, we generate the row uuid when creating the
> > > Logical_Flow record in SB DB, rather than waiting for SB DB to populate
> > > back.
> > >
> > > Signed-off-by: Han Zhou <hzhou@ovn.org>
> > > ---
> > >  northd/northd.c | 12 ++++++++++--
> > >  1 file changed, 10 insertions(+), 2 deletions(-)
> > >
> > > diff --git a/northd/northd.c b/northd/northd.c
> > > index a45c8b53ad4e..98f528f93cfc 100644
> > > --- a/northd/northd.c
> > > +++ b/northd/northd.c
> > > @@ -5592,6 +5592,8 @@ struct ovn_lflow {
> > >                                    * 'dpg_bitmap'. */
> > >      struct ovn_dp_group *dpg;    /* Link to unique Sb datapath group.
> */
> > >      const char *where;
> > > +
> > > +    struct uuid sb_uuid;            /* SB DB row uuid, specified by
> northd. */
> >
> > Nit: can you please align this comment with the ones above?
> >
> > >  };
> > >
> > >  static void ovn_lflow_destroy(struct hmap *lflows, struct ovn_lflow
> *lflow);
> > > @@ -5649,6 +5651,7 @@ ovn_lflow_init(struct ovn_lflow *lflow, struct
> ovn_datapath *od,
> > >      lflow->ctrl_meter = ctrl_meter;
> > >      lflow->dpg = NULL;
> > >      lflow->where = where;
> > > +    lflow->sb_uuid = UUID_ZERO;
> > >  }
> > >
> > >  /* The lflow_hash_lock is a mutex array that protects updates to the
> shared
> > > @@ -16020,6 +16023,7 @@ void build_lflows(struct ovsdb_idl_txn
> *ovnsb_txn,
> > >              size_t n_datapaths;
> > >              bool is_switch;
> > >
> > > +            lflow->sb_uuid = sbflow->header_.uuid;
> > >              is_switch = ovn_stage_to_datapath_type(lflow->stage) ==
> DP_SWITCH;
> > >              if (is_switch) {
> > >                  n_datapaths = ods_size(input_data->ls_datapaths);
> > > @@ -16095,7 +16099,9 @@ void build_lflows(struct ovsdb_idl_txn
> *ovnsb_txn,
> > >              dp_groups = &lr_dp_groups;
> > >          }
> > >
> > > -        sbflow = sbrec_logical_flow_insert(ovnsb_txn);
> > > +        lflow->sb_uuid = uuid_random();
> >
> > If we ever decide to parallelize the lflow synchronization code this
> > will hurt performance.  Until then we're fine I guess.
>
> Good point! I will add a comment to remind us before merging this.

Interesting. Only after looking into the uuid_random() code can one
figure that it acquires a lock.

>
> >
> > > +        sbflow = sbrec_logical_flow_insert_persist_uuid(ovnsb_txn,
> > > +
>  &lflow->sb_uuid);
> > >          if (lflow->od) {
> > >              sbrec_logical_flow_set_logical_datapath(sbflow,
> lflow->od->sb);
> > >          } else {
> > > @@ -16305,7 +16311,9 @@ bool lflow_handle_northd_ls_changes(struct
> ovsdb_idl_txn *ovnsb_txn,
> > >
> > >              /* Sync to SB. */
> > >              const struct sbrec_logical_flow *sbflow;
> > > -            sbflow = sbrec_logical_flow_insert(ovnsb_txn);
> > > +            lflow->sb_uuid = uuid_random();
> > > +            sbflow = sbrec_logical_flow_insert_persist_uuid(ovnsb_txn,
> > > +
>  &lflow->sb_uuid);
> > >              const char *pipeline =
> ovn_stage_get_pipeline_name(lflow->stage);
> > >              uint8_t table = ovn_stage_get_table(lflow->stage);
> > >              sbrec_logical_flow_set_logical_datapath(sbflow,
> lflow->od->sb);
> >
> > With the small nit about the comment indentation addressed:
> >
> > Acked-by: Dumitru Ceara <dceara@redhat.com>

Acked-by: Numan Siddique <numans@ovn.org>

Numan

> >
> _______________________________________________
> dev mailing list
> dev@openvswitch.org
> https://mail.openvswitch.org/mailman/listinfo/ovs-dev
Han Zhou July 1, 2023, 12:59 a.m. UTC | #4
On Thu, Jun 29, 2023 at 10:06 PM Numan Siddique <numans@ovn.org> wrote:
>
> On Fri, Jun 30, 2023 at 7:01 AM Han Zhou <hzhou@ovn.org> wrote:
> >
> > On Thu, Jun 29, 2023 at 4:57 AM Dumitru Ceara <dceara@redhat.com> wrote:
> > >
> > > On 6/18/23 08:17, Han Zhou wrote:
> > > > For incremental processing, we need to maintain SB lflow uuids in
> > > > northd. For this reason, we generate the row uuid when creating the
> > > > Logical_Flow record in SB DB, rather than waiting for SB DB to
populate
> > > > back.
> > > >
> > > > Signed-off-by: Han Zhou <hzhou@ovn.org>
> > > > ---
> > > >  northd/northd.c | 12 ++++++++++--
> > > >  1 file changed, 10 insertions(+), 2 deletions(-)
> > > >
> > > > diff --git a/northd/northd.c b/northd/northd.c
> > > > index a45c8b53ad4e..98f528f93cfc 100644
> > > > --- a/northd/northd.c
> > > > +++ b/northd/northd.c
> > > > @@ -5592,6 +5592,8 @@ struct ovn_lflow {
> > > >                                    * 'dpg_bitmap'. */
> > > >      struct ovn_dp_group *dpg;    /* Link to unique Sb datapath
group.
> > */
> > > >      const char *where;
> > > > +
> > > > +    struct uuid sb_uuid;            /* SB DB row uuid, specified by
> > northd. */
> > >
> > > Nit: can you please align this comment with the ones above?
> > >
> > > >  };
> > > >
> > > >  static void ovn_lflow_destroy(struct hmap *lflows, struct ovn_lflow
> > *lflow);
> > > > @@ -5649,6 +5651,7 @@ ovn_lflow_init(struct ovn_lflow *lflow, struct
> > ovn_datapath *od,
> > > >      lflow->ctrl_meter = ctrl_meter;
> > > >      lflow->dpg = NULL;
> > > >      lflow->where = where;
> > > > +    lflow->sb_uuid = UUID_ZERO;
> > > >  }
> > > >
> > > >  /* The lflow_hash_lock is a mutex array that protects updates to
the
> > shared
> > > > @@ -16020,6 +16023,7 @@ void build_lflows(struct ovsdb_idl_txn
> > *ovnsb_txn,
> > > >              size_t n_datapaths;
> > > >              bool is_switch;
> > > >
> > > > +            lflow->sb_uuid = sbflow->header_.uuid;
> > > >              is_switch = ovn_stage_to_datapath_type(lflow->stage) ==
> > DP_SWITCH;
> > > >              if (is_switch) {
> > > >                  n_datapaths = ods_size(input_data->ls_datapaths);
> > > > @@ -16095,7 +16099,9 @@ void build_lflows(struct ovsdb_idl_txn
> > *ovnsb_txn,
> > > >              dp_groups = &lr_dp_groups;
> > > >          }
> > > >
> > > > -        sbflow = sbrec_logical_flow_insert(ovnsb_txn);
> > > > +        lflow->sb_uuid = uuid_random();
> > >
> > > If we ever decide to parallelize the lflow synchronization code this
> > > will hurt performance.  Until then we're fine I guess.
> >
> > Good point! I will add a comment to remind us before merging this.
>
> Interesting. Only after looking into the uuid_random() code can one
> figure that it acquires a lock.
>
> >
> > >
> > > > +        sbflow = sbrec_logical_flow_insert_persist_uuid(ovnsb_txn,
> > > > +
> >  &lflow->sb_uuid);
> > > >          if (lflow->od) {
> > > >              sbrec_logical_flow_set_logical_datapath(sbflow,
> > lflow->od->sb);
> > > >          } else {
> > > > @@ -16305,7 +16311,9 @@ bool lflow_handle_northd_ls_changes(struct
> > ovsdb_idl_txn *ovnsb_txn,
> > > >
> > > >              /* Sync to SB. */
> > > >              const struct sbrec_logical_flow *sbflow;
> > > > -            sbflow = sbrec_logical_flow_insert(ovnsb_txn);
> > > > +            lflow->sb_uuid = uuid_random();
> > > > +            sbflow =
sbrec_logical_flow_insert_persist_uuid(ovnsb_txn,
> > > > +
> >  &lflow->sb_uuid);
> > > >              const char *pipeline =
> > ovn_stage_get_pipeline_name(lflow->stage);
> > > >              uint8_t table = ovn_stage_get_table(lflow->stage);
> > > >              sbrec_logical_flow_set_logical_datapath(sbflow,
> > lflow->od->sb);
> > >
> > > With the small nit about the comment indentation addressed:
> > >
> > > Acked-by: Dumitru Ceara <dceara@redhat.com>
>
> Acked-by: Numan Siddique <numans@ovn.org>
>
> Numan

Thanks Dumitru and Numan. I applied this patch to main.
>
> > >
> > _______________________________________________
> > dev mailing list
> > dev@openvswitch.org
> > https://mail.openvswitch.org/mailman/listinfo/ovs-dev
diff mbox series

Patch

diff --git a/northd/northd.c b/northd/northd.c
index a45c8b53ad4e..98f528f93cfc 100644
--- a/northd/northd.c
+++ b/northd/northd.c
@@ -5592,6 +5592,8 @@  struct ovn_lflow {
                                   * 'dpg_bitmap'. */
     struct ovn_dp_group *dpg;    /* Link to unique Sb datapath group. */
     const char *where;
+
+    struct uuid sb_uuid;            /* SB DB row uuid, specified by northd. */
 };
 
 static void ovn_lflow_destroy(struct hmap *lflows, struct ovn_lflow *lflow);
@@ -5649,6 +5651,7 @@  ovn_lflow_init(struct ovn_lflow *lflow, struct ovn_datapath *od,
     lflow->ctrl_meter = ctrl_meter;
     lflow->dpg = NULL;
     lflow->where = where;
+    lflow->sb_uuid = UUID_ZERO;
 }
 
 /* The lflow_hash_lock is a mutex array that protects updates to the shared
@@ -16020,6 +16023,7 @@  void build_lflows(struct ovsdb_idl_txn *ovnsb_txn,
             size_t n_datapaths;
             bool is_switch;
 
+            lflow->sb_uuid = sbflow->header_.uuid;
             is_switch = ovn_stage_to_datapath_type(lflow->stage) == DP_SWITCH;
             if (is_switch) {
                 n_datapaths = ods_size(input_data->ls_datapaths);
@@ -16095,7 +16099,9 @@  void build_lflows(struct ovsdb_idl_txn *ovnsb_txn,
             dp_groups = &lr_dp_groups;
         }
 
-        sbflow = sbrec_logical_flow_insert(ovnsb_txn);
+        lflow->sb_uuid = uuid_random();
+        sbflow = sbrec_logical_flow_insert_persist_uuid(ovnsb_txn,
+                                                        &lflow->sb_uuid);
         if (lflow->od) {
             sbrec_logical_flow_set_logical_datapath(sbflow, lflow->od->sb);
         } else {
@@ -16305,7 +16311,9 @@  bool lflow_handle_northd_ls_changes(struct ovsdb_idl_txn *ovnsb_txn,
 
             /* Sync to SB. */
             const struct sbrec_logical_flow *sbflow;
-            sbflow = sbrec_logical_flow_insert(ovnsb_txn);
+            lflow->sb_uuid = uuid_random();
+            sbflow = sbrec_logical_flow_insert_persist_uuid(ovnsb_txn,
+                                                            &lflow->sb_uuid);
             const char *pipeline = ovn_stage_get_pipeline_name(lflow->stage);
             uint8_t table = ovn_stage_get_table(lflow->stage);
             sbrec_logical_flow_set_logical_datapath(sbflow, lflow->od->sb);