diff mbox series

[ovs-dev,2/7] ovsdb: Add extra internal tables to databases for replication purposes.

Message ID 20210501005548.3071269-3-i.maximets@ovn.org
State Superseded
Headers show
Series OVSDB 2-Tier deployment. | expand

Commit Message

Ilya Maximets May 1, 2021, 12:55 a.m. UTC
New flag for ovsdb table schema 'copyForReplication'.  It's needed to
enable replication of a _Server database in later commits.

With this option ovsdb-server will create a new _synced_<table-name>
table where it will store data received from the replication source
while keeping the original table for data of the local server.
This way ovsdb-server will be able to keep state of local databases
while replicating state of databases from the active server.

Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
---
 Documentation/ref/ovsdb-server.7.rst |  7 +++++++
 ovsdb/ovsdb-doc                      |  3 ++-
 ovsdb/ovsdb.c                        | 19 ++++++++++++++++++-
 ovsdb/table.c                        | 20 ++++++++++++++++----
 ovsdb/table.h                        |  4 +++-
 python/ovs/db/schema.py              | 20 +++++++++++++++++---
 6 files changed, 63 insertions(+), 10 deletions(-)

Comments

Dumitru Ceara May 10, 2021, 11:45 a.m. UTC | #1
On 5/1/21 2:55 AM, Ilya Maximets wrote:
> New flag for ovsdb table schema 'copyForReplication'.  It's needed to
> enable replication of a _Server database in later commits.
> 
> With this option ovsdb-server will create a new _synced_<table-name>
> table where it will store data received from the replication source
> while keeping the original table for data of the local server.
> This way ovsdb-server will be able to keep state of local databases
> while replicating state of databases from the active server.
> 
> Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
> ---

One tiny nit, otherwise:

Acked-by: Dumitru Ceara <dceara@redhat.com>

Regards,
Dumitru

>  Documentation/ref/ovsdb-server.7.rst |  7 +++++++
>  ovsdb/ovsdb-doc                      |  3 ++-
>  ovsdb/ovsdb.c                        | 19 ++++++++++++++++++-
>  ovsdb/table.c                        | 20 ++++++++++++++++----
>  ovsdb/table.h                        |  4 +++-
>  python/ovs/db/schema.py              | 20 +++++++++++++++++---
>  6 files changed, 63 insertions(+), 10 deletions(-)
> 
> diff --git a/Documentation/ref/ovsdb-server.7.rst b/Documentation/ref/ovsdb-server.7.rst
> index 04414350a..717f62d81 100644
> --- a/Documentation/ref/ovsdb-server.7.rst
> +++ b/Documentation/ref/ovsdb-server.7.rst
> @@ -104,6 +104,13 @@ configuration and the following columns:
>      The set of columns and column:key pairs for which authorized update and
>      mutate operations should be permitted.
>  
> +Since version 2.16, database table could be copied for replication purposes
> +by setting ``copyForReplication`` flag to ``true``.  For each table marked
> +with this flag, ``ovsdb-server`` will create one more table with the same
> +name and ``_synced_`` prefix (e.g., ``_synced_<table-name>``).  Server in a
> +backup role will keep its own content in the original table and will put
> +data, received from the active server, to this special table.
> +
>  4 Wire Protocol
>  ---------------
>  
> diff --git a/ovsdb/ovsdb-doc b/ovsdb/ovsdb-doc
> index 10d0c0c13..5513783c1 100755
> --- a/ovsdb/ovsdb-doc
> +++ b/ovsdb/ovsdb-doc
> @@ -213,7 +213,8 @@ def docsToNroff(schemaFile, xmlFile, erFile, version=None):
>              introNodes += [dbNode]
>  
>      documented_tables = set((name for (name, title) in summary))
> -    schema_tables = set(schema.tables.keys())
> +    schema_tables = set([name for name in schema.tables.keys()
> +                         if not name.startswith("_")])
>      undocumented_tables = schema_tables - documented_tables
>      for table in undocumented_tables:
>          raise error.Error("undocumented table %s" % table)
> diff --git a/ovsdb/ovsdb.c b/ovsdb/ovsdb.c
> index 9042658fa..f662a0238 100644
> --- a/ovsdb/ovsdb.c
> +++ b/ovsdb/ovsdb.c
> @@ -238,6 +238,21 @@ ovsdb_schema_from_json(const struct json *json, struct ovsdb_schema **schemap)
>          }
>  
>          shash_add(&schema->tables, table->name, table);
> +
> +        if (table->copy_for_replication) {
> +            /* Need to create a copy of the table for the case it will be
> +             * synced from another server. */
> +            struct ovsdb_table_schema *synced_table;
> +
> +            synced_table = ovsdb_table_schema_clone(table);
> +            free(synced_table->name);
> +            synced_table->name = xasprintf("_synced_%s", node->name);
> +            /* Clearing 'copy' flag to avoid accidental further copying. */
> +            synced_table->copy_for_replication = false;
> +
> +            shash_add(&schema->tables, synced_table->name, synced_table);
> +        }
> +
>      }
>  
>      /* "isRoot" was not part of the original schema definition.  Before it was
> @@ -308,8 +323,10 @@ ovsdb_schema_to_json(const struct ovsdb_schema *schema)
>  
>      SHASH_FOR_EACH (node, &schema->tables) {
>          struct ovsdb_table_schema *table = node->data;
> -        json_object_put(tables, table->name,
> +        if (node->name[0] != '_') {
> +            json_object_put(tables, table->name,
>                          ovsdb_table_schema_to_json(table, default_is_root));

Nit: indentation.

> +        }
>      }
>      json_object_put(json, "tables", tables);
>  
> diff --git a/ovsdb/table.c b/ovsdb/table.c
> index 6cd2d886d..b46946072 100644
> --- a/ovsdb/table.c
> +++ b/ovsdb/table.c
> @@ -36,7 +36,8 @@ add_column(struct ovsdb_table_schema *ts, struct ovsdb_column *column)
>  
>  struct ovsdb_table_schema *
>  ovsdb_table_schema_create(const char *name, bool mutable,
> -                          unsigned int max_rows, bool is_root)
> +                          unsigned int max_rows, bool is_root,
> +                          bool copy_for_replication)
>  {
>      struct ovsdb_column *uuid, *version;
>      struct ovsdb_table_schema *ts;
> @@ -47,6 +48,7 @@ ovsdb_table_schema_create(const char *name, bool mutable,
>      shash_init(&ts->columns);
>      ts->max_rows = max_rows;
>      ts->is_root = is_root;
> +    ts->copy_for_replication = copy_for_replication;
>  
>      uuid = ovsdb_column_create("_uuid", false, true, &ovsdb_type_uuid);
>      add_column(ts, uuid);
> @@ -70,7 +72,8 @@ ovsdb_table_schema_clone(const struct ovsdb_table_schema *old)
>      size_t i;
>  
>      new = ovsdb_table_schema_create(old->name, old->mutable,
> -                                    old->max_rows, old->is_root);
> +                                    old->max_rows, old->is_root,
> +                                    old->copy_for_replication);
>      SHASH_FOR_EACH (node, &old->columns) {
>          const struct ovsdb_column *column = node->data;
>  
> @@ -126,7 +129,8 @@ ovsdb_table_schema_from_json(const struct json *json, const char *name,
>                               struct ovsdb_table_schema **tsp)
>  {
>      struct ovsdb_table_schema *ts;
> -    const struct json *columns, *mutable, *max_rows, *is_root, *indexes;
> +    const struct json *columns, *mutable, *max_rows;
> +    const struct json *is_root, *indexes, *copy_for_replication;
>      struct shash_node *node;
>      struct ovsdb_parser parser;
>      struct ovsdb_error *error;
> @@ -141,6 +145,8 @@ ovsdb_table_schema_from_json(const struct json *json, const char *name,
>      max_rows = ovsdb_parser_member(&parser, "maxRows",
>                                     OP_INTEGER | OP_OPTIONAL);
>      is_root = ovsdb_parser_member(&parser, "isRoot", OP_BOOLEAN | OP_OPTIONAL);
> +    copy_for_replication = ovsdb_parser_member(&parser, "copyForReplication",
> +                                               OP_BOOLEAN | OP_OPTIONAL);
>      indexes = ovsdb_parser_member(&parser, "indexes", OP_ARRAY | OP_OPTIONAL);
>      error = ovsdb_parser_finish(&parser);
>      if (error) {
> @@ -165,7 +171,10 @@ ovsdb_table_schema_from_json(const struct json *json, const char *name,
>      ts = ovsdb_table_schema_create(name,
>                                     mutable ? json_boolean(mutable) : true,
>                                     MIN(n_max_rows, UINT_MAX),
> -                                   is_root ? json_boolean(is_root) : false);
> +                                   is_root ? json_boolean(is_root) : false,
> +                                   copy_for_replication
> +                                   ? json_boolean(copy_for_replication)
> +                                   : false);
>      SHASH_FOR_EACH (node, json_object(columns)) {
>          struct ovsdb_column *column;
>  
> @@ -249,6 +258,9 @@ ovsdb_table_schema_to_json(const struct ovsdb_table_schema *ts,
>      if (default_is_root != ts->is_root) {
>          json_object_put(json, "isRoot", json_boolean_create(ts->is_root));
>      }
> +    if (ts->copy_for_replication) {
> +        json_object_put(json, "copyForReplication", json_boolean_create(true));
> +    }
>  
>      columns = json_object_create();
>  
> diff --git a/ovsdb/table.h b/ovsdb/table.h
> index 69dd649df..afd56f7a6 100644
> --- a/ovsdb/table.h
> +++ b/ovsdb/table.h
> @@ -29,6 +29,7 @@ struct ovsdb_table_schema {
>      char *name;
>      bool mutable;
>      bool is_root;               /* Part of garbage collection root set? */
> +    bool copy_for_replication;  /* '_synced_' copy of the table needed? */
>      unsigned int max_rows;      /* Maximum number of rows. */
>      struct shash columns;       /* Contains "struct ovsdb_column *"s. */
>      struct ovsdb_column_set *indexes;
> @@ -36,7 +37,8 @@ struct ovsdb_table_schema {
>  };
>  
>  struct ovsdb_table_schema *ovsdb_table_schema_create(
> -    const char *name, bool mutable, unsigned int max_rows, bool is_root);
> +    const char *name, bool mutable, unsigned int max_rows,
> +    bool is_root, bool copy_for_replication);
>  struct ovsdb_table_schema *ovsdb_table_schema_clone(
>      const struct ovsdb_table_schema *);
>  void ovsdb_table_schema_destroy(struct ovsdb_table_schema *);
> diff --git a/python/ovs/db/schema.py b/python/ovs/db/schema.py
> index 3ba844ae5..e58e81080 100644
> --- a/python/ovs/db/schema.py
> +++ b/python/ovs/db/schema.py
> @@ -80,6 +80,13 @@ class DbSchema(object):
>              _check_id(tableName, json)
>              tables[tableName] = TableSchema.from_json(tableJson, tableName,
>                                                        allow_extensions)
> +            if tables[tableName].copy_for_replication:
> +                synced_table_name = "_synced_" + tableName
> +                synced_table = TableSchema.from_json(tableJson,
> +                                                     synced_table_name,
> +                                                     allow_extensions)
> +                synced_table.copy_for_replication = False
> +                tables[synced_table_name] = synced_table
>  
>          return DbSchema(name, version, tables)
>  
> @@ -92,7 +99,8 @@ class DbSchema(object):
>  
>          tables = {}
>          for table in self.tables.values():
> -            tables[table.name] = table.to_json(default_is_root)
> +            if not table.name.startswith("_"):
> +                tables[table.name] = table.to_json(default_is_root)
>          json = {"name": self.name, "tables": tables}
>          if self.version:
>              json["version"] = self.version
> @@ -172,7 +180,8 @@ def column_set_from_json(json, columns):
>  
>  class TableSchema(object):
>      def __init__(self, name, columns, mutable=True, max_rows=sys.maxsize,
> -                 is_root=True, indexes=[], extensions={}):
> +                 is_root=True, indexes=[], extensions={},
> +                 copy_for_replication=False):
>          self.name = name
>          self.columns = columns
>          self.mutable = mutable
> @@ -180,6 +189,7 @@ class TableSchema(object):
>          self.is_root = is_root
>          self.indexes = indexes
>          self.extensions = extensions
> +        self.copy_for_replication = copy_for_replication
>  
>      @staticmethod
>      def from_json(json, name, allow_extensions=False):
> @@ -188,6 +198,8 @@ class TableSchema(object):
>          mutable = parser.get_optional("mutable", [bool], True)
>          max_rows = parser.get_optional("maxRows", [int])
>          is_root = parser.get_optional("isRoot", [bool], False)
> +        copy_for_replication = parser.get_optional("copyForReplication",
> +                                                   [bool], False)
>          indexes_json = parser.get_optional("indexes", [list], [])
>          if allow_extensions:
>              extensions = parser.get_optional("extensions", [dict], {})
> @@ -224,7 +236,7 @@ class TableSchema(object):
>              indexes.append(index)
>  
>          return TableSchema(name, columns, mutable, max_rows, is_root, indexes,
> -                           extensions)
> +                           extensions, copy_for_replication)
>  
>      def to_json(self, default_is_root=False):
>          """Returns this table schema serialized into JSON.
> @@ -243,6 +255,8 @@ class TableSchema(object):
>              json["mutable"] = False
>          if default_is_root != self.is_root:
>              json["isRoot"] = self.is_root
> +        if self.copy_for_replication:
> +            json["copyForReplication"] = True
>  
>          json["columns"] = columns = {}
>          for column in self.columns.values():
>
diff mbox series

Patch

diff --git a/Documentation/ref/ovsdb-server.7.rst b/Documentation/ref/ovsdb-server.7.rst
index 04414350a..717f62d81 100644
--- a/Documentation/ref/ovsdb-server.7.rst
+++ b/Documentation/ref/ovsdb-server.7.rst
@@ -104,6 +104,13 @@  configuration and the following columns:
     The set of columns and column:key pairs for which authorized update and
     mutate operations should be permitted.
 
+Since version 2.16, database table could be copied for replication purposes
+by setting ``copyForReplication`` flag to ``true``.  For each table marked
+with this flag, ``ovsdb-server`` will create one more table with the same
+name and ``_synced_`` prefix (e.g., ``_synced_<table-name>``).  Server in a
+backup role will keep its own content in the original table and will put
+data, received from the active server, to this special table.
+
 4 Wire Protocol
 ---------------
 
diff --git a/ovsdb/ovsdb-doc b/ovsdb/ovsdb-doc
index 10d0c0c13..5513783c1 100755
--- a/ovsdb/ovsdb-doc
+++ b/ovsdb/ovsdb-doc
@@ -213,7 +213,8 @@  def docsToNroff(schemaFile, xmlFile, erFile, version=None):
             introNodes += [dbNode]
 
     documented_tables = set((name for (name, title) in summary))
-    schema_tables = set(schema.tables.keys())
+    schema_tables = set([name for name in schema.tables.keys()
+                         if not name.startswith("_")])
     undocumented_tables = schema_tables - documented_tables
     for table in undocumented_tables:
         raise error.Error("undocumented table %s" % table)
diff --git a/ovsdb/ovsdb.c b/ovsdb/ovsdb.c
index 9042658fa..f662a0238 100644
--- a/ovsdb/ovsdb.c
+++ b/ovsdb/ovsdb.c
@@ -238,6 +238,21 @@  ovsdb_schema_from_json(const struct json *json, struct ovsdb_schema **schemap)
         }
 
         shash_add(&schema->tables, table->name, table);
+
+        if (table->copy_for_replication) {
+            /* Need to create a copy of the table for the case it will be
+             * synced from another server. */
+            struct ovsdb_table_schema *synced_table;
+
+            synced_table = ovsdb_table_schema_clone(table);
+            free(synced_table->name);
+            synced_table->name = xasprintf("_synced_%s", node->name);
+            /* Clearing 'copy' flag to avoid accidental further copying. */
+            synced_table->copy_for_replication = false;
+
+            shash_add(&schema->tables, synced_table->name, synced_table);
+        }
+
     }
 
     /* "isRoot" was not part of the original schema definition.  Before it was
@@ -308,8 +323,10 @@  ovsdb_schema_to_json(const struct ovsdb_schema *schema)
 
     SHASH_FOR_EACH (node, &schema->tables) {
         struct ovsdb_table_schema *table = node->data;
-        json_object_put(tables, table->name,
+        if (node->name[0] != '_') {
+            json_object_put(tables, table->name,
                         ovsdb_table_schema_to_json(table, default_is_root));
+        }
     }
     json_object_put(json, "tables", tables);
 
diff --git a/ovsdb/table.c b/ovsdb/table.c
index 6cd2d886d..b46946072 100644
--- a/ovsdb/table.c
+++ b/ovsdb/table.c
@@ -36,7 +36,8 @@  add_column(struct ovsdb_table_schema *ts, struct ovsdb_column *column)
 
 struct ovsdb_table_schema *
 ovsdb_table_schema_create(const char *name, bool mutable,
-                          unsigned int max_rows, bool is_root)
+                          unsigned int max_rows, bool is_root,
+                          bool copy_for_replication)
 {
     struct ovsdb_column *uuid, *version;
     struct ovsdb_table_schema *ts;
@@ -47,6 +48,7 @@  ovsdb_table_schema_create(const char *name, bool mutable,
     shash_init(&ts->columns);
     ts->max_rows = max_rows;
     ts->is_root = is_root;
+    ts->copy_for_replication = copy_for_replication;
 
     uuid = ovsdb_column_create("_uuid", false, true, &ovsdb_type_uuid);
     add_column(ts, uuid);
@@ -70,7 +72,8 @@  ovsdb_table_schema_clone(const struct ovsdb_table_schema *old)
     size_t i;
 
     new = ovsdb_table_schema_create(old->name, old->mutable,
-                                    old->max_rows, old->is_root);
+                                    old->max_rows, old->is_root,
+                                    old->copy_for_replication);
     SHASH_FOR_EACH (node, &old->columns) {
         const struct ovsdb_column *column = node->data;
 
@@ -126,7 +129,8 @@  ovsdb_table_schema_from_json(const struct json *json, const char *name,
                              struct ovsdb_table_schema **tsp)
 {
     struct ovsdb_table_schema *ts;
-    const struct json *columns, *mutable, *max_rows, *is_root, *indexes;
+    const struct json *columns, *mutable, *max_rows;
+    const struct json *is_root, *indexes, *copy_for_replication;
     struct shash_node *node;
     struct ovsdb_parser parser;
     struct ovsdb_error *error;
@@ -141,6 +145,8 @@  ovsdb_table_schema_from_json(const struct json *json, const char *name,
     max_rows = ovsdb_parser_member(&parser, "maxRows",
                                    OP_INTEGER | OP_OPTIONAL);
     is_root = ovsdb_parser_member(&parser, "isRoot", OP_BOOLEAN | OP_OPTIONAL);
+    copy_for_replication = ovsdb_parser_member(&parser, "copyForReplication",
+                                               OP_BOOLEAN | OP_OPTIONAL);
     indexes = ovsdb_parser_member(&parser, "indexes", OP_ARRAY | OP_OPTIONAL);
     error = ovsdb_parser_finish(&parser);
     if (error) {
@@ -165,7 +171,10 @@  ovsdb_table_schema_from_json(const struct json *json, const char *name,
     ts = ovsdb_table_schema_create(name,
                                    mutable ? json_boolean(mutable) : true,
                                    MIN(n_max_rows, UINT_MAX),
-                                   is_root ? json_boolean(is_root) : false);
+                                   is_root ? json_boolean(is_root) : false,
+                                   copy_for_replication
+                                   ? json_boolean(copy_for_replication)
+                                   : false);
     SHASH_FOR_EACH (node, json_object(columns)) {
         struct ovsdb_column *column;
 
@@ -249,6 +258,9 @@  ovsdb_table_schema_to_json(const struct ovsdb_table_schema *ts,
     if (default_is_root != ts->is_root) {
         json_object_put(json, "isRoot", json_boolean_create(ts->is_root));
     }
+    if (ts->copy_for_replication) {
+        json_object_put(json, "copyForReplication", json_boolean_create(true));
+    }
 
     columns = json_object_create();
 
diff --git a/ovsdb/table.h b/ovsdb/table.h
index 69dd649df..afd56f7a6 100644
--- a/ovsdb/table.h
+++ b/ovsdb/table.h
@@ -29,6 +29,7 @@  struct ovsdb_table_schema {
     char *name;
     bool mutable;
     bool is_root;               /* Part of garbage collection root set? */
+    bool copy_for_replication;  /* '_synced_' copy of the table needed? */
     unsigned int max_rows;      /* Maximum number of rows. */
     struct shash columns;       /* Contains "struct ovsdb_column *"s. */
     struct ovsdb_column_set *indexes;
@@ -36,7 +37,8 @@  struct ovsdb_table_schema {
 };
 
 struct ovsdb_table_schema *ovsdb_table_schema_create(
-    const char *name, bool mutable, unsigned int max_rows, bool is_root);
+    const char *name, bool mutable, unsigned int max_rows,
+    bool is_root, bool copy_for_replication);
 struct ovsdb_table_schema *ovsdb_table_schema_clone(
     const struct ovsdb_table_schema *);
 void ovsdb_table_schema_destroy(struct ovsdb_table_schema *);
diff --git a/python/ovs/db/schema.py b/python/ovs/db/schema.py
index 3ba844ae5..e58e81080 100644
--- a/python/ovs/db/schema.py
+++ b/python/ovs/db/schema.py
@@ -80,6 +80,13 @@  class DbSchema(object):
             _check_id(tableName, json)
             tables[tableName] = TableSchema.from_json(tableJson, tableName,
                                                       allow_extensions)
+            if tables[tableName].copy_for_replication:
+                synced_table_name = "_synced_" + tableName
+                synced_table = TableSchema.from_json(tableJson,
+                                                     synced_table_name,
+                                                     allow_extensions)
+                synced_table.copy_for_replication = False
+                tables[synced_table_name] = synced_table
 
         return DbSchema(name, version, tables)
 
@@ -92,7 +99,8 @@  class DbSchema(object):
 
         tables = {}
         for table in self.tables.values():
-            tables[table.name] = table.to_json(default_is_root)
+            if not table.name.startswith("_"):
+                tables[table.name] = table.to_json(default_is_root)
         json = {"name": self.name, "tables": tables}
         if self.version:
             json["version"] = self.version
@@ -172,7 +180,8 @@  def column_set_from_json(json, columns):
 
 class TableSchema(object):
     def __init__(self, name, columns, mutable=True, max_rows=sys.maxsize,
-                 is_root=True, indexes=[], extensions={}):
+                 is_root=True, indexes=[], extensions={},
+                 copy_for_replication=False):
         self.name = name
         self.columns = columns
         self.mutable = mutable
@@ -180,6 +189,7 @@  class TableSchema(object):
         self.is_root = is_root
         self.indexes = indexes
         self.extensions = extensions
+        self.copy_for_replication = copy_for_replication
 
     @staticmethod
     def from_json(json, name, allow_extensions=False):
@@ -188,6 +198,8 @@  class TableSchema(object):
         mutable = parser.get_optional("mutable", [bool], True)
         max_rows = parser.get_optional("maxRows", [int])
         is_root = parser.get_optional("isRoot", [bool], False)
+        copy_for_replication = parser.get_optional("copyForReplication",
+                                                   [bool], False)
         indexes_json = parser.get_optional("indexes", [list], [])
         if allow_extensions:
             extensions = parser.get_optional("extensions", [dict], {})
@@ -224,7 +236,7 @@  class TableSchema(object):
             indexes.append(index)
 
         return TableSchema(name, columns, mutable, max_rows, is_root, indexes,
-                           extensions)
+                           extensions, copy_for_replication)
 
     def to_json(self, default_is_root=False):
         """Returns this table schema serialized into JSON.
@@ -243,6 +255,8 @@  class TableSchema(object):
             json["mutable"] = False
         if default_is_root != self.is_root:
             json["isRoot"] = self.is_root
+        if self.copy_for_replication:
+            json["copyForReplication"] = True
 
         json["columns"] = columns = {}
         for column in self.columns.values():