@@ -19,7 +19,9 @@ ovn_controller_ovn_controller_SOURCES = \
ovn/controller/ovn-controller.c \
ovn/controller/ovn-controller.h \
ovn/controller/physical.c \
- ovn/controller/physical.h
+ ovn/controller/physical.h \
+ ovn/controller/filter.c \
+ ovn/controller/filter.h
ovn_controller_ovn_controller_LDADD = ovn/lib/libovn.la lib/libopenvswitch.la
man_MANS += ovn/controller/ovn-controller.8
EXTRA_DIST += ovn/controller/ovn-controller.8.xml
@@ -27,6 +27,8 @@
#include "openvswitch/vlog.h"
#include "ovn/lib/ovn-sb-idl.h"
#include "ovn-controller.h"
+#include "lport.h"
+#include "filter.h"
VLOG_DEFINE_THIS_MODULE(binding);
@@ -55,7 +57,9 @@ binding_register_ovs_idl(struct ovsdb_idl *ovs_idl)
static void
get_local_iface_ids(const struct ovsrec_bridge *br_int,
struct shash *lport_to_iface,
- struct sset *all_lports)
+ struct sset *all_lports,
+ struct lport_index *lports_index,
+ struct controller_ctx *ctx)
{
int i;
@@ -78,13 +82,17 @@ get_local_iface_ids(const struct ovsrec_bridge *br_int,
}
shash_add(lport_to_iface, iface_id, iface_rec);
sset_add(all_lports, iface_id);
+ if (!lport_lookup_by_name(lports_index, iface_id)) {
+ filter_lport(ctx, iface_id);
+ }
}
}
}
static void
add_local_datapath(struct hmap *local_datapaths,
- const struct sbrec_port_binding *binding_rec)
+ const struct sbrec_port_binding *binding_rec,
+ struct controller_ctx *ctx)
{
if (get_local_datapath(local_datapaths,
binding_rec->datapath->tunnel_key)) {
@@ -96,6 +104,7 @@ add_local_datapath(struct hmap *local_datapaths,
memcpy(&ld->uuid, &binding_rec->header_.uuid, sizeof ld->uuid);
hmap_insert(local_datapaths, &ld->hmap_node,
binding_rec->datapath->tunnel_key);
+ filter_datapath(ctx, binding_rec->datapath);
}
static void
@@ -127,7 +136,7 @@ consider_local_datapath(struct controller_ctx *ctx,
/* Add child logical port to the set of all local ports. */
sset_add(all_lports, binding_rec->logical_port);
}
- add_local_datapath(local_datapaths, binding_rec);
+ add_local_datapath(local_datapaths, binding_rec, ctx);
if (iface_rec && ctx->ovs_idl_txn) {
update_qos(iface_rec, binding_rec);
}
@@ -162,7 +171,7 @@ consider_local_datapath(struct controller_ctx *ctx,
}
sset_add(all_lports, binding_rec->logical_port);
- add_local_datapath(local_datapaths, binding_rec);
+ add_local_datapath(local_datapaths, binding_rec, ctx);
if (binding_rec->chassis == chassis_rec) {
return;
}
@@ -176,7 +185,7 @@ consider_local_datapath(struct controller_ctx *ctx,
const char *chassis = smap_get(&binding_rec->options,
"l3gateway-chassis");
if (!strcmp(chassis, chassis_rec->name) && ctx->ovnsb_idl_txn) {
- add_local_datapath(local_datapaths, binding_rec);
+ add_local_datapath(local_datapaths, binding_rec, ctx);
}
} else if (chassis_rec && binding_rec->chassis == chassis_rec) {
if (ctx->ovnsb_idl_txn) {
@@ -198,8 +207,8 @@ consider_local_datapath(struct controller_ctx *ctx,
void
binding_run(struct controller_ctx *ctx, const struct ovsrec_bridge *br_int,
- const char *chassis_id, struct hmap *local_datapaths,
- struct sset *all_lports)
+ const char *chassis_id, struct lport_index *lports_index,
+ struct hmap *local_datapaths, struct sset *all_lports)
{
const struct sbrec_chassis *chassis_rec;
const struct sbrec_port_binding *binding_rec;
@@ -211,7 +220,8 @@ binding_run(struct controller_ctx *ctx, const struct ovsrec_bridge *br_int,
}
if (br_int) {
- get_local_iface_ids(br_int, &lport_to_iface, all_lports);
+ get_local_iface_ids(br_int, &lport_to_iface, all_lports,
+ lports_index, ctx);
}
/* Run through each binding record to see if it is resident on this
@@ -25,11 +25,12 @@ struct ovsdb_idl;
struct ovsrec_bridge;
struct simap;
struct sset;
+struct lport_index;
void binding_register_ovs_idl(struct ovsdb_idl *);
void binding_run(struct controller_ctx *, const struct ovsrec_bridge *br_int,
- const char *chassis_id, struct hmap *local_datapaths,
- struct sset *all_lports);
+ const char *chassis_id, struct lport_index *lports_index,
+ struct hmap *local_datapaths, struct sset *all_lports);
bool binding_cleanup(struct controller_ctx *, const char *chassis_id);
#endif /* ovn/binding.h */
new file mode 100644
@@ -0,0 +1,239 @@
+/* Copyright (c) 2015, 2016 Nicira, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <config.h>
+#include "filter.h"
+
+#include "openvswitch/vlog.h"
+#include "ovn/lib/ovn-sb-idl.h"
+#include "ovn-controller.h"
+#include "lport.h"
+
+VLOG_DEFINE_THIS_MODULE(filter);
+
+static struct hmap filtered_dps = HMAP_INITIALIZER(&filtered_dps);
+static struct hmap filtered_lps = HMAP_INITIALIZER(&filtered_lps);
+static bool skip_removal = false;
+
+struct filtered_dp {
+ struct hmap_node hmap_node;
+ int64_t tunnel_key;
+ struct uuid datapath;
+ bool used;
+};
+
+struct filtered_lp {
+ struct hmap_node hmap_node;
+ const char *lport_name;
+ bool used;
+};
+
+/* Initializes 'idl' so that by default no rows are replicated in tables that
+ * ovn-controller does not need to be fully replicated. */
+void
+filter_init(struct ovsdb_idl *idl)
+{
+ sbrec_port_binding_add_clause_false(idl);
+ sbrec_mac_binding_add_clause_false(idl);
+ sbrec_logical_flow_add_clause_false(idl);
+ sbrec_multicast_group_add_clause_false(idl);
+}
+
+/* Marks all replicated ports as "unused". */
+void
+filter_mark_unused(void)
+{
+ struct filtered_lp *lp;
+ struct filtered_dp *dp;
+
+ HMAP_FOR_EACH (lp, hmap_node, &filtered_lps) {
+ lp->used = false;
+ }
+ HMAP_FOR_EACH (dp, hmap_node, &filtered_dps) {
+ dp->used = false;
+ }
+ skip_removal = false;
+}
+
+/* Skip removal of unused elements in this round */
+void
+filter_skip_removal(void)
+{
+ skip_removal = true;
+}
+
+/* Clears the filter conditions, so that no rows are replicated. */
+void
+filter_clear(struct ovsdb_idl *idl)
+{
+ struct filtered_lp *lp, *next_lp;
+ struct filtered_lp *dp, *next_dp;
+
+ HMAP_FOR_EACH_SAFE (lp, next_lp, hmap_node, &filtered_lps) {
+ hmap_remove(&filtered_lps, &lp->hmap_node);
+ free(lp);
+ }
+ HMAP_FOR_EACH_SAFE (dp, next_dp, hmap_node, &filtered_dps) {
+ hmap_remove(&filtered_dps, &dp->hmap_node);
+ free(dp);
+ }
+
+ ovsdb_idl_condition_reset(idl, &sbrec_table_port_binding);
+ ovsdb_idl_condition_reset(idl, &sbrec_table_logical_flow);
+ ovsdb_idl_condition_reset(idl, &sbrec_table_mac_binding);
+ ovsdb_idl_condition_reset(idl, &sbrec_table_multicast_group);
+
+ filter_init(idl);
+}
+
+static struct filtered_dp *
+lookup_dp_by_key(int64_t tunnel_key)
+{
+ struct filtered_dp *dp;
+
+ HMAP_FOR_EACH_WITH_HASH (dp, hmap_node, tunnel_key, &filtered_dps) {
+ if (dp->tunnel_key == tunnel_key) {
+ return dp;
+ }
+ }
+ return NULL;
+}
+
+/* Un-replicates logical ports or datapaths that have not been re-added via
+ * filter_lport() or filter_datapath() since the last call to
+ * filter_mark_unused(). */
+void
+filter_remove_unused_elements(struct controller_ctx *ctx,
+ const struct lport_index *lports_index)
+{
+ if (skip_removal) {
+ return;
+ }
+
+ struct filtered_lp *lp, *next;
+ struct filtered_dp *dp, *next_dp;
+
+ HMAP_FOR_EACH_SAFE (lp, next, hmap_node, &filtered_lps) {
+ if (!lp->used) {
+ const struct sbrec_port_binding *pb =
+ lport_lookup_by_name(lports_index, lp->lport_name);
+ if (!pb) {
+ continue;
+ }
+ if (lookup_dp_by_key(pb->datapath->tunnel_key)) {
+ VLOG_DBG("Unfilter Port %s", lp->lport_name);
+ sbrec_port_binding_remove_clause_logical_port(ctx->ovnsb_idl,
+ OVSDB_F_EQ,
+ lp->lport_name);
+ hmap_remove(&filtered_lps, &lp->hmap_node);
+ free(lp);
+ }
+ }
+ }
+ HMAP_FOR_EACH_SAFE (dp, next_dp, hmap_node, &filtered_dps) {
+ if (!dp->used) {
+ unfilter_datapath(ctx, dp->tunnel_key);
+ }
+ }
+}
+
+/* Adds 'lport_name' to the logical ports whose Port_Binding rows are
+ * replicated. */
+void
+filter_lport(struct controller_ctx *ctx, const char *lport_name)
+{
+ struct filtered_lp *lp;
+ size_t hash = hash_string(lport_name, 0);
+
+ HMAP_FOR_EACH_WITH_HASH(lp, hmap_node, hash, &filtered_lps) {
+ if (!strcmp(lp->lport_name, lport_name)) {
+ lp->used = true;
+ return;
+ }
+ }
+
+ VLOG_DBG("Filter Port %s", lport_name);
+ sbrec_port_binding_add_clause_logical_port(ctx->ovnsb_idl,
+ OVSDB_F_EQ,
+ lport_name);
+
+ lp = xmalloc(sizeof *lp);
+ lp->lport_name = xstrdup(lport_name);
+ lp->used = true;
+ hmap_insert(&filtered_lps, &lp->hmap_node, hash);
+}
+
+/* Adds 'datapath' to the datapaths whose Port_Binding, Mac_Binding,
+ * Logical_Flow, and Multicast_Group rows are replicated. */
+void
+filter_datapath(struct controller_ctx *ctx,
+ const struct sbrec_datapath_binding *datapath)
+{
+ struct filtered_dp *dp;
+
+ dp = lookup_dp_by_key(datapath->tunnel_key);
+ if (dp) {
+ dp->used = true;
+ return;
+ }
+
+ dp = xmalloc(sizeof *dp);
+ dp->tunnel_key = datapath->tunnel_key;
+ dp->datapath = datapath->header_.uuid;
+ dp->used = true;
+ hmap_insert(&filtered_dps, &dp->hmap_node, datapath->tunnel_key);
+
+ VLOG_DBG("Filter DP "UUID_FMT, UUID_ARGS(&datapath->header_.uuid));
+ sbrec_port_binding_add_clause_datapath(ctx->ovnsb_idl,
+ OVSDB_F_EQ,
+ &dp->datapath);
+ sbrec_mac_binding_add_clause_datapath(ctx->ovnsb_idl,
+ OVSDB_F_EQ,
+ &dp->datapath);
+ sbrec_logical_flow_add_clause_logical_datapath(ctx->ovnsb_idl,
+ OVSDB_F_EQ,
+ &dp->datapath);
+ sbrec_multicast_group_add_clause_datapath(ctx->ovnsb_idl,
+ OVSDB_F_EQ,
+ &dp->datapath);
+
+}
+
+/* Removes 'datapath' from the datapaths whose Port_Binding, Mac_Binding,
+ * Logical_Flow, and Multicast_Group rows are replicated. */
+void
+unfilter_datapath(struct controller_ctx *ctx, int64_t tunnel_key)
+{
+ struct filtered_dp *dp = lookup_dp_by_key(tunnel_key);
+
+ if (dp) {
+ VLOG_DBG("Unfilter DP "UUID_FMT,
+ UUID_ARGS(&dp->datapath));
+ sbrec_port_binding_remove_clause_datapath(ctx->ovnsb_idl,
+ OVSDB_F_EQ,
+ &dp->datapath);
+ sbrec_mac_binding_remove_clause_datapath(ctx->ovnsb_idl,
+ OVSDB_F_EQ,
+ &dp->datapath);
+ sbrec_logical_flow_remove_clause_logical_datapath(ctx->ovnsb_idl,
+ OVSDB_F_EQ,
+ &dp->datapath);
+ sbrec_multicast_group_remove_clause_datapath(ctx->ovnsb_idl,
+ OVSDB_F_EQ,
+ &dp->datapath);
+ hmap_remove(&filtered_dps, &dp->hmap_node);
+ free(dp);
+ }
+}
new file mode 100644
@@ -0,0 +1,47 @@
+/* Copyright (c) 2015 Nicira, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OVN_FILTER_H
+#define OVN_FILTER_H 1
+
+/* Database client filtering
+ * -------------------------
+ *
+ * By default the OVSDB IDL replicates the entire contents of each table. For
+ * some tables, however, ovn-controller only needs some rows. For example, in
+ * the Logical_Flow table, it only needs the rows for logical datapaths that
+ * are in use directly or indirectly on this hypervisor. These functions aid
+ * in limiting the rows that the IDL replicates.
+ */
+
+#include <stdint.h>
+
+struct controller_ctx;
+struct ovsdb_idl;
+struct sbrec_datapath_binding;
+struct lport_index;
+
+void filter_init(struct ovsdb_idl *);
+void filter_clear(struct ovsdb_idl *);
+void filter_mark_unused(void);
+void filter_skip_removal(void);
+void filter_remove_unused_elements(struct controller_ctx *,
+ const struct lport_index *);
+void filter_lport(struct controller_ctx *, const char *lport_name);
+void filter_datapath(struct controller_ctx *,
+ const struct sbrec_datapath_binding *);
+void unfilter_datapath(struct controller_ctx *, int64_t tunnel_key);
+
+#endif /* ovn/controller/filter.h */
@@ -54,6 +54,7 @@
#include "stream.h"
#include "unixctl.h"
#include "util.h"
+#include "filter.h"
VLOG_DEFINE_THIS_MODULE(main);
@@ -473,6 +474,8 @@ main(int argc, char *argv[])
ovsdb_idl_create(ovnsb_remote, &sbrec_idl_class, true, true));
ovsdb_idl_omit_alert(ovnsb_idl_loop.idl, &sbrec_chassis_col_nb_cfg);
+ filter_init(ovnsb_idl_loop.idl);
+
/* Track the southbound idl. */
ovsdb_idl_track_add_all(ovnsb_idl_loop.idl);
@@ -497,6 +500,7 @@ main(int argc, char *argv[])
free(ovnsb_remote);
ovnsb_remote = new_ovnsb_remote;
ovsdb_idl_set_remote(ovnsb_idl_loop.idl, ovnsb_remote, true);
+ filter_clear(ovnsb_idl_loop.idl);
} else {
free(new_ovnsb_remote);
}
@@ -521,21 +525,24 @@ main(int argc, char *argv[])
const char *chassis_id = get_chassis_id(ctx.ovs_idl);
const struct sbrec_chassis *chassis = NULL;
+ static struct lport_index lports;
+ static struct mcgroup_index mcgroups;
+
+ lport_index_init(&lports, ctx.ovnsb_idl);
+ mcgroup_index_init(&mcgroups, ctx.ovnsb_idl);
+
+ filter_mark_unused();
+
if (chassis_id) {
chassis = chassis_run(&ctx, chassis_id, br_int);
encaps_run(&ctx, br_int, chassis_id);
- binding_run(&ctx, br_int, chassis_id, &local_datapaths,
+ binding_run(&ctx, br_int, chassis_id, &lports, &local_datapaths,
&all_lports);
}
if (br_int && chassis) {
patch_run(&ctx, br_int, chassis_id, &local_datapaths,
- &patched_datapaths);
-
- static struct lport_index lports;
- static struct mcgroup_index mcgroups;
- lport_index_init(&lports, ctx.ovnsb_idl);
- mcgroup_index_init(&mcgroups, ctx.ovnsb_idl);
+ &patched_datapaths, &lports);
enum mf_field_id mff_ovn_geneve = ofctrl_run(br_int,
&pending_ct_zones);
@@ -563,10 +570,15 @@ main(int argc, char *argv[])
sbrec_chassis_set_nb_cfg(chassis, cur_cfg);
}
}
- mcgroup_index_destroy(&mcgroups);
- lport_index_destroy(&lports);
+
+ /* Cleanup unused elements only after we called for both binding_run()
+ * and patch_run() */
+ filter_remove_unused_elements(&ctx, &lports);
}
+ mcgroup_index_destroy(&mcgroups);
+ lport_index_destroy(&lports);
+
sset_destroy(&all_lports);
struct local_datapath *cur_node, *next_node;
@@ -70,6 +70,7 @@ struct patched_datapath {
bool local; /* 'True' if the datapath is for gateway router. */
bool stale; /* 'True' if the datapath is not referenced by any patch
* port. */
+ int64_t tunnel_key;
};
struct patched_datapath *get_patched_datapath(const struct hmap *,
@@ -24,6 +24,7 @@
#include "openvswitch/hmap.h"
#include "openvswitch/vlog.h"
#include "ovn-controller.h"
+#include "filter.h"
VLOG_DEFINE_THIS_MODULE(patch);
@@ -252,7 +253,8 @@ add_bridge_mappings(struct controller_ctx *ctx,
static void
add_patched_datapath(struct hmap *patched_datapaths,
- const struct sbrec_port_binding *binding_rec, bool local)
+ const struct sbrec_port_binding *binding_rec, bool local,
+ struct controller_ctx *ctx)
{
struct patched_datapath *pd = get_patched_datapath(patched_datapaths,
binding_rec->datapath->tunnel_key);
@@ -266,9 +268,11 @@ add_patched_datapath(struct hmap *patched_datapaths,
pd = xzalloc(sizeof *pd);
pd->local = local;
pd->key = binding_rec->datapath->header_.uuid;
+ pd->tunnel_key = binding_rec->datapath->tunnel_key;
/* stale is set to false. */
hmap_insert(patched_datapaths, &pd->hmap_node,
binding_rec->datapath->tunnel_key);
+ filter_datapath(ctx, binding_rec->datapath);
}
static void
@@ -285,7 +289,9 @@ add_logical_patch_ports_preprocess(struct hmap *patched_datapaths)
/* This function should cleanup stale patched datapaths and any memory
* allocated for fields within a stale patched datapath. */
static void
-add_logical_patch_ports_postprocess(struct hmap *patched_datapaths)
+add_logical_patch_ports_postprocess(struct hmap *patched_datapaths,
+ struct hmap *local_datapaths,
+ struct controller_ctx *ctx)
{
/* Clean up stale patched datapaths. */
struct patched_datapath *pd_cur_node, *pd_next_node;
@@ -293,6 +299,9 @@ add_logical_patch_ports_postprocess(struct hmap *patched_datapaths)
patched_datapaths) {
if (pd_cur_node->stale == true) {
hmap_remove(patched_datapaths, &pd_cur_node->hmap_node);
+ if (!get_local_datapath(local_datapaths, pd_cur_node->tunnel_key)) {
+ unfilter_datapath(ctx, pd_cur_node->tunnel_key);
+ }
free(pd_cur_node);
}
}
@@ -325,7 +334,9 @@ add_logical_patch_ports(struct controller_ctx *ctx,
const struct ovsrec_bridge *br_int,
const char *local_chassis_id,
struct shash *existing_ports,
- struct hmap *patched_datapaths)
+ struct hmap *patched_datapaths,
+ struct hmap *local_datapaths,
+ struct lport_index *lports_index)
{
const struct sbrec_chassis *chassis_rec;
chassis_rec = get_chassis(ctx->ovnsb_idl, local_chassis_id);
@@ -362,23 +373,28 @@ add_logical_patch_ports(struct controller_ctx *ctx,
existing_ports);
free(dst_name);
free(src_name);
- add_patched_datapath(patched_datapaths, binding, local_port);
+ add_patched_datapath(patched_datapaths, binding, local_port, ctx);
if (local_port) {
if (binding->chassis != chassis_rec && ctx->ovnsb_idl_txn) {
sbrec_port_binding_set_chassis(binding, chassis_rec);
}
}
+
+ if (!lport_lookup_by_name(lports_index, peer)) {
+ filter_lport(ctx, peer);
+ }
}
}
- add_logical_patch_ports_postprocess(patched_datapaths);
+ add_logical_patch_ports_postprocess(patched_datapaths, local_datapaths, ctx);
}
void
patch_run(struct controller_ctx *ctx, const struct ovsrec_bridge *br_int,
const char *chassis_id, struct hmap *local_datapaths,
- struct hmap *patched_datapaths)
+ struct hmap *patched_datapaths, struct lport_index *lports_index)
{
if (!ctx->ovs_idl_txn) {
+ filter_skip_removal();
return;
}
@@ -400,7 +416,7 @@ patch_run(struct controller_ctx *ctx, const struct ovsrec_bridge *br_int,
add_bridge_mappings(ctx, br_int, &existing_ports, local_datapaths,
chassis_id);
add_logical_patch_ports(ctx, br_int, chassis_id, &existing_ports,
- patched_datapaths);
+ patched_datapaths, local_datapaths, lports_index);
/* Now 'existing_ports' only still contains patch ports that exist in the
* database but shouldn't. Delete them from the database. */
@@ -25,9 +25,11 @@
struct controller_ctx;
struct hmap;
struct ovsrec_bridge;
+struct lport_index;
void patch_run(struct controller_ctx *, const struct ovsrec_bridge *br_int,
const char *chassis_id, struct hmap *local_datapaths,
- struct hmap *patched_datapaths);
+ struct hmap *patched_datapaths,
+ struct lport_index *lports_index);
#endif /* ovn/patch.h */
@@ -92,11 +92,14 @@ AT_CHECK([ovn-sbctl \
-- --id=@dp2 create Datapath_Binding tunnel_key=2 \
-- create Port_Binding datapath=@dp1 logical_port=foo tunnel_key=1 type=patch options:peer=bar \
-- create Port_Binding datapath=@dp2 logical_port=bar tunnel_key=2 type=patch options:peer=foo \
+ -- create Port_Binding datapath=@dp1 logical_port=localvif3 tunnel_key=3 \
| ${PERL} $srcdir/uuidfilt.pl], [0], [<0>
<1>
<2>
<3>
+<4>
])
+ovs-vsctl add-port br-int localvif3 -- set Interface localvif3 external_ids:iface-id=localvif3
check_patches \
'br-int patch-br-int-to-localnet2 patch-localnet2-to-br-int' \
'br-eth0 patch-localnet2-to-br-int patch-br-int-to-localnet2' \
@@ -1525,6 +1525,10 @@ done
ovn_populate_arp
+# Allow some time for ovn-northd and ovn-controller to catch up.
+# XXX This should be more systematic.
+sleep 1
+
# XXX This is now the 3rd copy of these functions in this file ...
# Given the name of a logical port, prints the name of the hypervisor
@@ -1809,6 +1813,10 @@ ovn-nbctl lsp-set-addresses lp1 f0:00:00:00:00:01
ovn-nbctl lsp-add lsw0 lp2
ovn-nbctl lsp-set-addresses lp2 f0:00:00:00:00:02
+# Create dummy lport to make lsw0 visible on hv_gw
+ovn-nbctl lsp-add lsw0 lp-dummy
+ovn-nbctl lsp-set-addresses lp-dummy f0:00:00:00:00:ff
+
ovn-nbctl lsp-add lsw0 lp-gw
ovn-nbctl lsp-set-type lp-gw l2gateway
ovn-nbctl lsp-set-options lp-gw network_name=physnet1 l2gateway-chassis=hv_gw
@@ -1840,6 +1848,7 @@ ovn_attach n1 br-phys 192.168.0.3
ovs-vsctl add-br br-phys2
net_attach n2 br-phys2
ovs-vsctl set open . external_ids:ovn-bridge-mappings="physnet1:br-phys2"
+ovs-vsctl add-port br-int vif-dummy -- set Interface vif-dummy external-ids:iface-id=lp-dummy
# Add hv3 on the other side of the GW
sim_add hv3
@@ -5038,6 +5047,9 @@ ovn-nbctl lsp-add ls0 lrp0-rp -- set Logical_Switch_Port lrp0-rp \
type=router options:router-port=lrp0-rp addresses='"f0:00:00:00:00:01"'
# Add nat-address option
ovn-nbctl lsp-set-options lrp0-rp router-port=lrp0 nat-addresses="f0:00:00:00:00:01 192.168.0.2"
+# Create dummy lport to make ls0 visible on hv1
+ovn-nbctl lsp-add ls0 lp-dummy
+
net_add n1
sim_add hv1
@@ -5048,6 +5060,9 @@ ovs-vsctl \
ovn_attach n1 br-phys 192.168.0.1
+ovs-vsctl \
+ -- add-port br-int vif-dummy -- set Interface vif-dummy external-ids:iface-id=lp-dummy
+
AT_CHECK([ovs-vsctl set Open_vSwitch . external-ids:ovn-bridge-mappings=physnet1:br-eth0])
AT_CHECK([ovs-vsctl add-port br-eth0 snoopvif -- set Interface snoopvif options:tx_pcap=hv1/snoopvif-tx.pcap options:rxq_pcap=hv1/snoopvif-rx.pcap])
@@ -5316,10 +5331,6 @@ AT_CHECK([ovs-vsctl add-port br-int localvif1 -- set Interface localvif1 externa
# On hv1, check that there are no flows outputting bcast to tunnel
OVS_WAIT_UNTIL([test `ovs-ofctl dump-flows br-int table=32 | ofctl_strip | grep output | wc -l` -eq 0])
-# On hv2, check that there is 1 flow outputting bcast to tunnel to hv1.
-as hv2
-OVS_WAIT_UNTIL([test `ovs-ofctl dump-flows br-int table=32 | ofctl_strip | grep output | wc -l` -eq 1])
-
# Now bind vif2 on hv2.
AT_CHECK([ovs-vsctl add-port br-int localvif2 -- set Interface localvif2 external_ids:iface-id=localvif2])
Conditional monitor of: Port_Binding, Logical_Flow, Multicast_Group MAC_Binding tables. As a result ovn-controller will be notified only about records belongs to a datapath that is being served by this hypervisor. Performance evaluation: OVN is the main candidate for conditional monitoring usage. It is clear that conditional monitoring reduces computation on the ovn-controller (client) side due to the reduced size of flow tables and update messages. Performance evaluation shows up to 75% computation reduction on the ovn-controller side. However, performance evaluation shows also a reduction in computation on the SB ovsdb-server side proportional to the degree that each logical network is spread over physical hosts in the DC. Evaluation shows that in a realistic scenarios there is a computation reduction also in the server side. Evaluation on simulated environment of 50 hosts and 1000 logical ports shows the following results (cycles #): LN spread over # hosts| master | patch | change ------------------------------------------------------------- 1 | 24597200127 | 24339235374 | 1.0% 6 | 23788521572 | 19145229352 | 19.5% 12 | 23886405758 | 17913143176 | 25.0% 18 | 25812686279 | 23675094540 | 8.2% 24 | 28414671499 | 24770202308 | 12.8% 30 | 31487218890 | 28397543436 | 9.8% 36 | 36116993930 | 34105388739 | 5.5% 42 | 37898342465 | 38647139083 | -1.9% 48 | 41637996229 | 41846616306 | -0.5% 50 | 41679995357 | 43455565977 | -4.2% Signed-off-by: Liran Schour <lirans@il.ibm.com> --- ovn/controller/automake.mk | 4 +- ovn/controller/binding.c | 26 +++-- ovn/controller/binding.h | 5 +- ovn/controller/filter.c | 239 ++++++++++++++++++++++++++++++++++++++++ ovn/controller/filter.h | 47 ++++++++ ovn/controller/ovn-controller.c | 30 +++-- ovn/controller/ovn-controller.h | 1 + ovn/controller/patch.c | 30 +++-- ovn/controller/patch.h | 4 +- tests/ovn-controller.at | 3 + tests/ovn.at | 19 +++- 11 files changed, 376 insertions(+), 32 deletions(-) create mode 100644 ovn/controller/filter.c create mode 100644 ovn/controller/filter.h