@@ -740,6 +740,27 @@ consider_logical_flow__(const struct sbrec_logical_flow *lflow,
return true;
}
+ const char *io_port = smap_get(&lflow->tags, "in_out_port");
+ if (io_port) {
+ lflow_resource_add(l_ctx_out->lfrr, REF_TYPE_PORTBINDING, io_port,
+ &lflow->header_.uuid);
+ const struct sbrec_port_binding *pb
+ = lport_lookup_by_name(l_ctx_in->sbrec_port_binding_by_name,
+ io_port);
+ if (!pb) {
+ VLOG_DBG("lflow "UUID_FMT" matches inport/outport %s that's not "
+ "found, skip", UUID_ARGS(&lflow->header_.uuid), io_port);
+ return true;
+ }
+ char buf[16];
+ get_unique_lport_key(dp->tunnel_key, pb->tunnel_key, buf, sizeof buf);
+ if (!sset_contains(l_ctx_in->related_lport_ids, buf)) {
+ VLOG_DBG("lflow "UUID_FMT" matches inport/outport %s that's not "
+ "local, skip", UUID_ARGS(&lflow->header_.uuid), io_port);
+ return true;
+ }
+ }
+
/* Determine translation of logical table IDs to physical table IDs. */
bool ingress = !strcmp(lflow->pipeline, "ingress");
With the help of logical_flow's in_out_port tag, we can skip parsing a big portion of the logical flows in SB DB, which can largely improve ovn-controller's performance whenever a full recompute is required. With a scale test topology of 1000 chassises, 20 LSPs per chassis, 20k lports in total spread acrossing 200 logical switches, connected by a logical router, the test result before & after this change: Before: - lflow-cache disabled: - ovn-controller recompute: 2.7 sec - lflow-cache enabled: - ovn-controller recompute: 2.1 sec - lflow cache memory: 622103 KB After: - lflow-cache disabled: - ovn-controller recompute: 0.83 sec - lflow-cache enabled: - ovn-controller recompute: 0.71 sec - lflow cache memory: 123641 KB (note: DP group enabled for both) So for this test scenario, when lflow cache is disabled, latency reduced ~70%; when lflow cache is enabled, latency reduced ~65% and lflow cache memory reduced ~80%. Signed-off-by: Han Zhou <hzhou@ovn.org> --- controller/lflow.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+)