@@ -67,17 +67,17 @@ struct uuid;
/* Start of LOG_PIPELINE_LEN tables. */
#define OFTABLE_LOG_INGRESS_PIPELINE 8
-#define OFTABLE_OUTPUT_LARGE_PKT_DETECT 40
-#define OFTABLE_OUTPUT_LARGE_PKT_PROCESS 41
-#define OFTABLE_REMOTE_OUTPUT 42
-#define OFTABLE_LOCAL_OUTPUT 43
-#define OFTABLE_CHECK_LOOPBACK 44
+#define OFTABLE_OUTPUT_LARGE_PKT_DETECT 41
+#define OFTABLE_OUTPUT_LARGE_PKT_PROCESS 42
+#define OFTABLE_REMOTE_OUTPUT 43
+#define OFTABLE_LOCAL_OUTPUT 44
+#define OFTABLE_CHECK_LOOPBACK 45
/* Start of the OUTPUT section of the pipeline. */
#define OFTABLE_OUTPUT_INIT OFTABLE_OUTPUT_LARGE_PKT_DETECT
/* Start of LOG_PIPELINE_LEN tables. */
-#define OFTABLE_LOG_EGRESS_PIPELINE 45
+#define OFTABLE_LOG_EGRESS_PIPELINE 46
#define OFTABLE_SAVE_INPORT 64
#define OFTABLE_LOG_TO_PHY 65
#define OFTABLE_MAC_BINDING 66
@@ -135,7 +135,8 @@ struct collector_set_ids;
OVNACT(CT_ORIG_IP6_DST, ovnact_result) \
OVNACT(CT_ORIG_TP_DST, ovnact_result) \
OVNACT(FLOOD_REMOTE, ovnact_null) \
- OVNACT(CT_STATE_SAVE, ovnact_result)
+ OVNACT(CT_STATE_SAVE, ovnact_result) \
+ OVNACT(MIRROR, ovnact_mirror) \
/* enum ovnact_type, with a member OVNACT_<ENUM> for each action. */
enum OVS_PACKED_ENUM ovnact_type {
@@ -538,6 +539,15 @@ struct ovnact_commit_lb_aff {
uint16_t timeout;
};
+/* OVNACT_MIRROR. */
+struct ovnact_mirror {
+ struct ovnact ovnact;
+
+ /* Argument. */
+ char *port; /* Mirror serving port for output
+ action. */
+};
+
#define OVN_FIELD_NOTE_MAGIC "ovn"
struct ovn_field_note_header {
@@ -5575,6 +5575,76 @@ format_CT_STATE_SAVE(const struct ovnact_result *res, struct ds *s)
ds_put_cstr(s, " = ct_state_save();");
}
+static void
+parse_MIRROR_action(struct action_context *ctx)
+{
+ if (!lexer_force_match(ctx->lexer, LEX_T_LPAREN)) {
+ return;
+ }
+
+ if (ctx->lexer->token.type != LEX_T_STRING) {
+ lexer_syntax_error(ctx->lexer, "expecting port name string");
+ return;
+ }
+
+ struct ovnact_mirror *mirror = ovnact_put_MIRROR(ctx->ovnacts);
+ mirror->port = xstrdup(ctx->lexer->token.s);
+
+ lexer_get(ctx->lexer);
+ lexer_force_match(ctx->lexer, LEX_T_RPAREN);
+}
+
+static void
+format_MIRROR(const struct ovnact_mirror *mirror,
+ struct ds *s)
+{
+ ds_put_cstr(s, "mirror(");
+ ds_put_format(s, "\"%s\"", mirror->port);
+ ds_put_cstr(s, ");");
+}
+
+static void
+encode_MIRROR(const struct ovnact_mirror *mirror,
+ const struct ovnact_encode_params *ep,
+ struct ofpbuf *ofpacts)
+{
+ size_t clone_ofs = ofpacts->size;
+ uint32_t vport_key;
+
+ if (!ep->lookup_port(ep->aux, mirror->port, &vport_key)) {
+ return;
+ }
+
+ struct ofpact_nest *clone = ofpact_put_CLONE(ofpacts);
+
+ /* We need set in_port to 0. Consider the following configuration:
+ * - hostA (target lport, source lport) + (dst lport) hostB
+ * - mirror in both directions (to and from lport) attached
+ * to dst port.
+ *
+ * When a packet comes from lport source to dst lport, for cloned
+ * mirrored packet inport will be equal to outport, and incoming
+ * traffic will not be mirrored.
+ *
+ * We have no problem with zeroing in_port: it will be no recirculations
+ * for packet proccesing on hostA, because we skip conntrack for traffic
+ * directed to the target port.
+ */
+ put_load(ofp_to_u16(OFPP_NONE), MFF_IN_PORT, 0, 16, ofpacts);
+ put_load(vport_key, MFF_LOG_OUTPORT, 0, 32, ofpacts);
+ emit_resubmit(ofpacts, OFTABLE_REMOTE_OUTPUT);
+ clone = ofpbuf_at_assert(ofpacts, clone_ofs, sizeof *clone);
+ ofpacts->header = clone;
+
+ ofpact_finish_CLONE(ofpacts, &clone);
+}
+
+static void
+ovnact_mirror_free(struct ovnact_mirror *mirror OVS_UNUSED)
+{
+ free(mirror->port);
+}
+
/* Parses an assignment or exchange or put_dhcp_opts action. */
static void
parse_set_action(struct action_context *ctx)
@@ -5808,6 +5878,8 @@ parse_action(struct action_context *ctx)
ovnact_put_MAC_CACHE_USE(ctx->ovnacts);
} else if (lexer_match_id(ctx->lexer, "flood_remote")) {
ovnact_put_FLOOD_REMOTE(ctx->ovnacts);
+ } else if (lexer_match_id(ctx->lexer, "mirror")) {
+ parse_MIRROR_action(ctx);
} else {
lexer_syntax_error(ctx->lexer, "expecting action");
}
@@ -1413,3 +1413,10 @@ ovn_debug_commands_register(void)
unixctl_command_register("debug/enable-timewarp", "", 0, 0,
ovn_enable_timewarp, NULL);
}
+
+char *
+ovn_mirror_port_name(const char *datapath_name,
+ const char *port_name)
+{
+ return xasprintf("mp-%s-%s", datapath_name, port_name);
+}
@@ -199,6 +199,8 @@ get_sb_port_group_name(const char *nb_pg_name, int64_t dp_tunnel_key,
}
char *ovn_chassis_redirect_name(const char *port_name);
+char *ovn_mirror_port_name(const char *datapath_name,
+ const char *port_name);
void ovn_set_pidfile(const char *name);
bool ip46_parse_cidr(const char *str, struct in6_addr *prefix,
@@ -312,8 +314,8 @@ BUILD_ASSERT_DECL(
#define SCTP_ABORT_CHUNK_FLAG_T (1 << 0)
/* The number of tables for the ingress and egress pipelines. */
-#define LOG_PIPELINE_INGRESS_LEN 30
-#define LOG_PIPELINE_EGRESS_LEN 13
+#define LOG_PIPELINE_INGRESS_LEN 31
+#define LOG_PIPELINE_EGRESS_LEN 14
static inline uint32_t
hash_add_in6_addr(uint32_t hash, const struct in6_addr *addr)
@@ -61,6 +61,10 @@ northd_get_input_data(struct engine_node *node,
engine_ovsdb_node_get_index(
engine_get_input("SB_fdb", node),
"sbrec_fdb_by_dp_and_port");
+ input_data->nbrec_mirror_by_type_and_sink =
+ engine_ovsdb_node_get_index(
+ engine_get_input("NB_mirror", node),
+ "nbrec_mirror_by_type_and_sink");
input_data->nbrec_logical_switch_table =
EN_OVSDB_GET(engine_get_input("NB_logical_switch", node));
@@ -72,6 +76,8 @@ northd_get_input_data(struct engine_node *node,
EN_OVSDB_GET(engine_get_input("NB_chassis_template_var", node));
input_data->nbrec_mirror_table =
EN_OVSDB_GET(engine_get_input("NB_mirror", node));
+ input_data->nbrec_mirror_rule_table =
+ EN_OVSDB_GET(engine_get_input("NB_mirror_rule", node));
input_data->sbrec_datapath_binding_table =
EN_OVSDB_GET(engine_get_input("SB_datapath_binding", node));
@@ -64,6 +64,7 @@ static unixctl_cb_func chassis_features_list;
NB_NODE(acl, "acl") \
NB_NODE(logical_router, "logical_router") \
NB_NODE(mirror, "mirror") \
+ NB_NODE(mirror_rule, "mirror_rule") \
NB_NODE(meter, "meter") \
NB_NODE(bfd, "bfd") \
NB_NODE(static_mac_binding, "static_mac_binding") \
@@ -210,6 +211,7 @@ void inc_proc_northd_init(struct ovsdb_idl_loop *nb,
engine_add_input(&en_acl_id, &en_sb_acl_id, NULL);
engine_add_input(&en_northd, &en_nb_mirror, NULL);
+ engine_add_input(&en_northd, &en_nb_mirror_rule, NULL);
engine_add_input(&en_northd, &en_nb_static_mac_binding, NULL);
engine_add_input(&en_northd, &en_nb_chassis_template_var, NULL);
@@ -492,6 +494,14 @@ void inc_proc_northd_init(struct ovsdb_idl_loop *nb,
engine_ovsdb_node_add_index(&en_sb_port_binding,
"sbrec_port_binding_by_name",
sbrec_port_binding_by_name);
+
+ struct ovsdb_idl_index *nbrec_mirror_by_type_and_sink
+ = ovsdb_idl_index_create2(nb->idl, &nbrec_mirror_col_type,
+ &nbrec_mirror_col_sink);
+ engine_ovsdb_node_add_index(&en_nb_mirror,
+ "nbrec_mirror_by_type_and_sink",
+ nbrec_mirror_by_type_and_sink);
+
struct ovsdb_idl_index *sbrec_ecmp_nexthop_by_ip_and_port
= ecmp_nexthop_index_create(sb->idl);
engine_ovsdb_node_add_index(&en_sb_ecmp_nexthop,
@@ -107,6 +107,11 @@ static bool vxlan_ic_mode;
* priority to determine the ACL's logical flow priority. */
#define OVN_ACL_PRI_OFFSET 1000
+/* Default logical flows for mirroring are added with the priority described
+ * below, in the case of mirroring rules specified in the northbound database
+ * this value will also be added to the priority. */
+#define OVN_LPORT_MIRROR_OFFSET 100
+
/* Register definitions specific to switches. */
#define REGBIT_CONNTRACK_DEFRAG "reg0[0]"
#define REGBIT_CONNTRACK_COMMIT "reg0[1]"
@@ -459,6 +464,12 @@ od_has_lb_vip(const struct ovn_datapath *od)
}
}
+static const char *
+ovn_datapath_name(const struct sbrec_datapath_binding *sb)
+{
+ return smap_get_def(&sb->external_ids, "name", "");
+}
+
/* A group of logical router datapaths which are connected - either
* directly or indirectly.
* Each logical router can belong to only one group. */
@@ -1197,6 +1208,12 @@ is_transit_router_port(struct ovn_port *op)
smap_get_bool(&op->sb->chassis->other_config, "is-remote", false);
}
+static bool
+is_mp_port(const struct ovn_port *op)
+{
+ return op->mirror_target_port;
+}
+
void
destroy_routable_addresses(struct ovn_port_routable_addresses *ra)
{
@@ -1343,7 +1360,7 @@ ovn_port_destroy(struct hmap *ports, struct ovn_port *port)
/* Don't remove port->list. The node should be removed from such lists
* before calling this function. */
hmap_remove(ports, &port->key_node);
- if (port->od && !port->primary_port) {
+ if (port->od && !is_mp_port(port) && !port->primary_port) {
hmap_remove(&port->od->ports, &port->dp_node);
}
ovn_port_destroy_orphan(port);
@@ -1460,8 +1477,8 @@ lsp_is_type_changed(const struct sbrec_port_binding *sb,
if (!sb->type[0] && !nbsp->type[0]) {
/* Two "VIF's" interface make sure both have parent_port
- * set or both have parent_port unset, otherwisre they are
- * different ports type.
+ * or mirror_port set or both have parent_port/mirror_port
+ * unset, otherwisre they are different ports type.
*/
if ((!sb->parent_port && nbsp->parent_name) ||
(sb->parent_port && !nbsp->parent_name)) {
@@ -2183,6 +2200,39 @@ parse_lsp_addrs(struct ovn_port *op)
op->n_ps_addrs++;
}
}
+
+static void
+create_mirror_port(struct ovn_port *op, struct hmap *ports,
+ struct ovs_list *both_dbs, struct ovs_list *nb_only,
+ const struct nbrec_mirror *nb_mirror)
+{
+ char *mp_name = ovn_mirror_port_name(ovn_datapath_name(op->od->sb),
+ nb_mirror->sink);
+ struct ovn_port *mp = ovn_port_find(ports, mp_name);
+ struct ovn_port *target_port = ovn_port_find(ports, nb_mirror->sink);
+
+ if (!target_port) {
+ goto clear;
+ }
+
+ if (!mp) {
+ mp = ovn_port_create(ports, mp_name, op->nbsp, NULL, NULL);
+ ovs_list_push_back(nb_only, &mp->list);
+ } else if (mp->sb) {
+ ovn_port_set_nb(mp, op->nbsp, NULL);
+ ovs_list_remove(&mp->list);
+ ovs_list_push_back(both_dbs, &mp->list);
+ } else {
+ goto clear;
+ }
+
+ mp->mirror_target_port = target_port;
+ mp->od = op->od;
+
+clear:
+ free(mp_name);
+}
+
static struct ovn_port *
join_logical_ports_lsp(struct hmap *ports,
struct ovs_list *nb_only, struct ovs_list *both,
@@ -2190,7 +2240,8 @@ join_logical_ports_lsp(struct hmap *ports,
const struct nbrec_logical_switch_port *nbsp,
const char *name,
unsigned long *queue_id_bitmap,
- struct hmap *tag_alloc_table)
+ struct hmap *tag_alloc_table,
+ struct hmapx *mirror_attached_ports)
{
struct ovn_port *op = ovn_port_find_bound(ports, name);
if (op && (op->od || op->nbsp || op->nbrp)) {
@@ -2271,6 +2322,11 @@ join_logical_ports_lsp(struct hmap *ports,
}
hmap_insert(&od->ports, &op->dp_node,
hmap_node_hash(&op->key_node));
+
+ if (nbsp->n_mirror_rules) {
+ hmapx_add(mirror_attached_ports, op);
+ }
+
tag_alloc_add_existing_tags(tag_alloc_table, nbsp);
return op;
}
@@ -2408,6 +2464,22 @@ peer_needs_cr_port_creation(struct ovn_port *op)
return false;
}
+static void
+join_mirror_ports(struct ovn_port *op,
+ const struct nbrec_logical_switch_port *nbsp,
+ struct hmap *ports, struct ovs_list *both,
+ struct ovs_list *nb_only)
+{
+ /* Create mirror targets port bindings if there any mirror
+ * with lport type attached to this port. */
+ for (size_t j = 0; j < op->nbsp->n_mirror_rules; j++) {
+ struct nbrec_mirror *mirror = nbsp->mirror_rules[j];
+ if (!strcmp(mirror->type, "lport")) {
+ create_mirror_port(op, ports, both, nb_only, mirror);
+ }
+ }
+}
+
static void
join_logical_ports(const struct sbrec_port_binding_table *sbrec_pb_table,
struct hmap *ls_datapaths, struct hmap *lr_datapaths,
@@ -2428,6 +2500,8 @@ join_logical_ports(const struct sbrec_port_binding_table *sbrec_pb_table,
struct ovn_datapath *od;
struct hmapx dgps = HMAPX_INITIALIZER(&dgps);
+ struct hmapx mirror_attached_ports =
+ HMAPX_INITIALIZER(&mirror_attached_ports);
HMAP_FOR_EACH (od, key_node, lr_datapaths) {
ovs_assert(od->nbr);
for (size_t i = 0; i < od->nbr->n_ports; i++) {
@@ -2454,7 +2528,7 @@ join_logical_ports(const struct sbrec_port_binding_table *sbrec_pb_table,
= od->nbs->ports[i];
join_logical_ports_lsp(ports, nb_only, both, od, nbsp,
nbsp->name, queue_id_bitmap,
- tag_alloc_table);
+ tag_alloc_table, &mirror_attached_ports);
}
}
@@ -2622,6 +2696,14 @@ join_logical_ports(const struct sbrec_port_binding_table *sbrec_pb_table,
}
hmapx_destroy(&dgps);
+ HMAPX_FOR_EACH (hmapx_node, &mirror_attached_ports) {
+ op = hmapx_node->data;
+ if (op && op->nbsp) {
+ join_mirror_ports(op, op->nbsp, ports, both, nb_only);
+ }
+ }
+ hmapx_destroy(&mirror_attached_ports);
+
/* Wait until all ports have been connected to add to IPAM since
* it relies on proper peers to be set
*/
@@ -3300,6 +3382,16 @@ ovn_port_update_sbrec(struct ovsdb_idl_txn *ovnsb_txn,
sbrec_port_binding_set_external_ids(op->sb, &op->nbrp->external_ids);
} else {
+ if (op->mirror_target_port) {
+ /* In case of using a lport mirror, we establish a port binding
+ * with mirror target port to act it like container port without
+ * tag it by vlan tag. */
+ sbrec_port_binding_set_type(op->sb, "mirror");
+ sbrec_port_binding_set_mirror_port(op->sb,
+ op->mirror_target_port->key);
+ goto common;
+ }
+
if (!lsp_is_router(op->nbsp)) {
uint32_t queue_id = smap_get_int(
&op->sb->options, "qdisc_queue_id", 0);
@@ -3499,6 +3591,8 @@ ovn_port_update_sbrec(struct ovsdb_idl_txn *ovnsb_txn,
}
}
+
+common:
if (op->tunnel_key != op->sb->tunnel_key) {
sbrec_port_binding_set_tunnel_key(op->sb, op->tunnel_key);
}
@@ -4799,6 +4893,38 @@ check_lsp_changes_other_than_up(const struct nbrec_logical_switch_port *nbsp)
return false;
}
+static bool
+is_lsp_mirror_target_port(
+ struct ovsdb_idl_index *nbrec_mirror_by_type_and_sink,
+ struct ovn_port *port)
+{
+ struct nbrec_mirror *target =
+ nbrec_mirror_index_init_row(nbrec_mirror_by_type_and_sink);
+ nbrec_mirror_index_set_type(target, "lport");
+ nbrec_mirror_index_set_sink(target, port->key);
+
+ const struct nbrec_mirror *nb_mirror =
+ nbrec_mirror_index_find(nbrec_mirror_by_type_and_sink, target);
+
+ nbrec_mirror_index_destroy_row(target);
+
+ if (nb_mirror) {
+ return true;
+ }
+
+ return false;
+}
+
+static bool
+lsp_handle_mirror_rules_changes(const struct nbrec_logical_switch_port *nbsp)
+{
+ if (nbrec_logical_switch_port_is_updated(nbsp,
+ NBREC_LOGICAL_SWITCH_PORT_COL_MIRROR_RULES)) {
+ return false;
+ }
+ return true;
+}
+
/* Handles logical switch port changes of a changed logical switch.
* Returns false, if any logical port can't be incrementally handled.
*/
@@ -4869,6 +4995,12 @@ ls_handle_lsp_changes(struct ovsdb_idl_txn *ovnsb_idl_txn,
* by this change. Fallback to recompute. */
goto fail;
}
+ if (!lsp_handle_mirror_rules_changes(new_nbsp) ||
+ is_lsp_mirror_target_port(ni->nbrec_mirror_by_type_and_sink,
+ op)) {
+ /* Fallback to recompute. */
+ goto fail;
+ }
if (!check_lsp_is_up &&
!check_lsp_changes_other_than_up(new_nbsp)) {
/* If the only change is the "up" column while the
@@ -4917,6 +5049,12 @@ ls_handle_lsp_changes(struct ovsdb_idl_txn *ovnsb_idl_txn,
sbrec_port_binding_delete(op->sb);
delete_fdb_entries(ni->sbrec_fdb_by_dp_and_port, od->tunnel_key,
op->tunnel_key);
+ if (is_lsp_mirror_target_port(ni->nbrec_mirror_by_type_and_sink,
+ op)) {
+ /* This port was used as target mirror port, fallback
+ * to recompute. */
+ goto fail;
+ }
}
}
@@ -5678,6 +5816,145 @@ build_dhcpv6_action(struct ovn_port *op, struct in6_addr *offer_ip,
return true;
}
+enum mirror_filter {
+ IN_MIRROR,
+ OUT_MIRROR,
+ BOTH_MIRROR,
+};
+
+static void
+build_mirror_default_lflow(struct ovn_datapath *od,
+ struct lflow_table *lflows)
+{
+ ovn_lflow_add(lflows, od, S_SWITCH_IN_MIRROR, 0, "1", "next;", NULL);
+ ovn_lflow_add(lflows, od, S_SWITCH_OUT_MIRROR, 0, "1", "next;", NULL);
+}
+
+static void
+build_mirror_lflow(struct ovn_port *op,
+ struct ovn_port *serving_port,
+ struct lflow_table *lflows,
+ struct nbrec_mirror_rule *rule, bool egress)
+{
+ struct ds match = DS_EMPTY_INITIALIZER;
+ struct ds action = DS_EMPTY_INITIALIZER;
+ enum ovn_stage stage;
+ const char *dir;
+ uint32_t priority = OVN_LPORT_MIRROR_OFFSET + rule->priority;
+
+ if (!strcmp(rule->action, "mirror")) {
+ ds_put_format(&action, "mirror(%s); ", serving_port->json_key);
+ }
+
+ if (egress) {
+ dir = "outport";
+ stage = S_SWITCH_OUT_MIRROR;
+ } else {
+ dir = "inport";
+ stage = S_SWITCH_IN_MIRROR;
+ }
+
+ ds_put_cstr(&action, "next;");
+ ds_put_format(&match, "%s == %s && (%s)", dir, op->json_key, rule->match);
+ ovn_lflow_add(lflows, op->od, stage, priority, ds_cstr(&match),
+ ds_cstr(&action), op->lflow_ref);
+
+ ds_destroy(&match);
+ ds_destroy(&action);
+}
+
+static void
+build_mirror_pass_lflow(struct ovn_port *op,
+ struct ovn_port *serving_port,
+ struct lflow_table *lflows, bool egress)
+{
+ struct ds match = DS_EMPTY_INITIALIZER;
+ struct ds action = DS_EMPTY_INITIALIZER;
+ enum ovn_stage stage;
+ const char *dir;
+
+ if (egress) {
+ dir = "outport";
+ stage = S_SWITCH_OUT_MIRROR;
+ } else {
+ dir = "inport";
+ stage = S_SWITCH_IN_MIRROR;
+ }
+
+ ds_put_format(&action, "mirror(%s); next;", serving_port->json_key);
+ ds_put_format(&match, "%s == %s", dir, op->json_key);
+ ovn_lflow_add(lflows, op->od, stage, OVN_LPORT_MIRROR_OFFSET,
+ ds_cstr(&match), ds_cstr(&action), op->lflow_ref);
+
+ ds_clear(&match);
+ ds_clear(&action);
+
+ /* We need to skip conntrack for all trafic directed to target port.*/
+ ds_put_format(&action, "next(pipeline=egress, table=%d);",
+ ovn_stage_get_table(S_SWITCH_OUT_APPLY_PORT_SEC));
+ ds_put_format(&match, "outport == %s", serving_port->json_key);
+
+ ovn_lflow_add(lflows, op->od, S_SWITCH_OUT_PRE_ACL, UINT16_MAX,
+ ds_cstr(&match), ds_cstr(&action), op->lflow_ref);
+
+ ds_destroy(&match);
+ ds_destroy(&action);
+}
+
+static void
+build_mirror_lflows(struct ovn_port *op,
+ const struct hmap *ls_ports,
+ struct lflow_table *lflows)
+{
+ enum mirror_filter filter;
+
+ for (size_t i = 0; i < op->nbsp->n_mirror_rules; i++) {
+ struct nbrec_mirror *mirror = op->nbsp->mirror_rules[i];
+
+ if (strcmp(mirror->type, "lport")) {
+ continue;
+ }
+
+ char *serving_port_name = ovn_mirror_port_name(
+ ovn_datapath_name(op->od->sb),
+ mirror->sink);
+
+ struct ovn_port *serving_port = ovn_port_find(ls_ports,
+ serving_port_name);
+
+ /* Mirror serving port wasn't created
+ * because the target port doesn't exist. */
+ if (!serving_port) {
+ free(serving_port_name);
+ continue;
+ }
+
+ filter = !strcmp(mirror->filter, "from-lport") ? IN_MIRROR :
+ !strcmp(mirror->filter, "to-lport") ? OUT_MIRROR
+ : BOTH_MIRROR;
+
+ if (filter == IN_MIRROR || filter == BOTH_MIRROR) {
+ build_mirror_pass_lflow(op, serving_port, lflows, false);
+ }
+ if (filter == OUT_MIRROR || filter == BOTH_MIRROR) {
+ build_mirror_pass_lflow(op, serving_port, lflows, true);
+ }
+
+ for (size_t j = 0; j < mirror->n_mirror_rules; j++) {
+ struct nbrec_mirror_rule *rule = mirror->mirror_rules[j];
+
+ if (filter == IN_MIRROR || filter == BOTH_MIRROR) {
+ build_mirror_lflow(op, serving_port, lflows, rule, false);
+ }
+ if (filter == OUT_MIRROR || filter == BOTH_MIRROR) {
+ build_mirror_lflow(op, serving_port, lflows, rule, true);
+ }
+ }
+
+ free(serving_port_name);
+ }
+}
+
/* Adds the logical flows in the (in/out) check port sec stage only if
* - the lport is disabled or
* - lport is of type vtep - to skip the ingress pipeline.
@@ -17477,6 +17754,7 @@ build_lswitch_and_lrouter_iterate_by_ls(struct ovn_datapath *od,
struct lswitch_flow_build_info *lsi)
{
ovs_assert(od->nbs);
+ build_mirror_default_lflow(od, lsi->lflows);
build_lswitch_lflows_pre_acl_and_acl(od, lsi->lflows,
lsi->meter_groups, NULL);
@@ -17553,7 +17831,11 @@ build_lswitch_and_lrouter_iterate_by_lsp(struct ovn_port *op,
{
ovs_assert(op->nbsp);
+ if (is_mp_port(op)) {
+ return;
+ }
/* Build Logical Switch Flows. */
+ build_mirror_lflows(op, ls_ports, lflows);
build_lswitch_port_sec_op(op, lflows, actions, match);
build_lswitch_learn_fdb_op(op, lflows, actions, match);
build_lswitch_arp_nd_responder_skip_local(op, lflows, match);
@@ -36,6 +36,7 @@ struct northd_input {
const struct nbrec_chassis_template_var_table
*nbrec_chassis_template_var_table;
const struct nbrec_mirror_table *nbrec_mirror_table;
+ const struct nbrec_mirror_rule_table *nbrec_mirror_rule_table;
/* Southbound table references */
const struct sbrec_datapath_binding_table *sbrec_datapath_binding_table;
@@ -73,6 +74,7 @@ struct northd_input {
struct ovsdb_idl_index *sbrec_ha_chassis_grp_by_name;
struct ovsdb_idl_index *sbrec_ip_mcast_by_dp;
struct ovsdb_idl_index *sbrec_fdb_by_dp_and_port;
+ struct ovsdb_idl_index *nbrec_mirror_by_type_and_sink;
};
/* A collection of datapaths. E.g. all logical switch datapaths, or all
@@ -472,37 +474,38 @@ enum ovn_stage {
/* Logical switch ingress stages. */ \
PIPELINE_STAGE(SWITCH, IN, CHECK_PORT_SEC, 0, "ls_in_check_port_sec") \
PIPELINE_STAGE(SWITCH, IN, APPLY_PORT_SEC, 1, "ls_in_apply_port_sec") \
- PIPELINE_STAGE(SWITCH, IN, LOOKUP_FDB , 2, "ls_in_lookup_fdb") \
- PIPELINE_STAGE(SWITCH, IN, PUT_FDB, 3, "ls_in_put_fdb") \
- PIPELINE_STAGE(SWITCH, IN, PRE_ACL, 4, "ls_in_pre_acl") \
- PIPELINE_STAGE(SWITCH, IN, PRE_LB, 5, "ls_in_pre_lb") \
- PIPELINE_STAGE(SWITCH, IN, PRE_STATEFUL, 6, "ls_in_pre_stateful") \
- PIPELINE_STAGE(SWITCH, IN, ACL_HINT, 7, "ls_in_acl_hint") \
- PIPELINE_STAGE(SWITCH, IN, ACL_EVAL, 8, "ls_in_acl_eval") \
- PIPELINE_STAGE(SWITCH, IN, ACL_SAMPLE, 9, "ls_in_acl_sample") \
- PIPELINE_STAGE(SWITCH, IN, ACL_ACTION, 10, "ls_in_acl_action") \
- PIPELINE_STAGE(SWITCH, IN, QOS, 11, "ls_in_qos") \
- PIPELINE_STAGE(SWITCH, IN, LB_AFF_CHECK, 12, "ls_in_lb_aff_check") \
- PIPELINE_STAGE(SWITCH, IN, LB, 13, "ls_in_lb") \
- PIPELINE_STAGE(SWITCH, IN, LB_AFF_LEARN, 14, "ls_in_lb_aff_learn") \
- PIPELINE_STAGE(SWITCH, IN, PRE_HAIRPIN, 15, "ls_in_pre_hairpin") \
- PIPELINE_STAGE(SWITCH, IN, NAT_HAIRPIN, 16, "ls_in_nat_hairpin") \
- PIPELINE_STAGE(SWITCH, IN, HAIRPIN, 17, "ls_in_hairpin") \
- PIPELINE_STAGE(SWITCH, IN, ACL_AFTER_LB_EVAL, 18, \
- "ls_in_acl_after_lb_eval") \
- PIPELINE_STAGE(SWITCH, IN, ACL_AFTER_LB_SAMPLE, 19, \
+ PIPELINE_STAGE(SWITCH, IN, MIRROR, 2, "ls_in_mirror") \
+ PIPELINE_STAGE(SWITCH, IN, LOOKUP_FDB, 3, "ls_in_lookup_fdb") \
+ PIPELINE_STAGE(SWITCH, IN, PUT_FDB, 4, "ls_in_put_fdb") \
+ PIPELINE_STAGE(SWITCH, IN, PRE_ACL, 5, "ls_in_pre_acl") \
+ PIPELINE_STAGE(SWITCH, IN, PRE_LB, 6, "ls_in_pre_lb") \
+ PIPELINE_STAGE(SWITCH, IN, PRE_STATEFUL, 7, "ls_in_pre_stateful") \
+ PIPELINE_STAGE(SWITCH, IN, ACL_HINT, 8, "ls_in_acl_hint") \
+ PIPELINE_STAGE(SWITCH, IN, ACL_EVAL, 9, "ls_in_acl_eval") \
+ PIPELINE_STAGE(SWITCH, IN, ACL_SAMPLE, 10, "ls_in_acl_sample") \
+ PIPELINE_STAGE(SWITCH, IN, ACL_ACTION, 11, "ls_in_acl_action") \
+ PIPELINE_STAGE(SWITCH, IN, QOS, 12, "ls_in_qos") \
+ PIPELINE_STAGE(SWITCH, IN, LB_AFF_CHECK, 13, "ls_in_lb_aff_check") \
+ PIPELINE_STAGE(SWITCH, IN, LB, 14, "ls_in_lb") \
+ PIPELINE_STAGE(SWITCH, IN, LB_AFF_LEARN, 15, "ls_in_lb_aff_learn") \
+ PIPELINE_STAGE(SWITCH, IN, PRE_HAIRPIN, 16, "ls_in_pre_hairpin") \
+ PIPELINE_STAGE(SWITCH, IN, NAT_HAIRPIN, 17, "ls_in_nat_hairpin") \
+ PIPELINE_STAGE(SWITCH, IN, HAIRPIN, 18, "ls_in_hairpin") \
+ PIPELINE_STAGE(SWITCH, IN, ACL_AFTER_LB_EVAL, 19, \
+ "ls_in_acl_after_lb_eval") \
+ PIPELINE_STAGE(SWITCH, IN, ACL_AFTER_LB_SAMPLE, 20, \
"ls_in_acl_after_lb_sample") \
- PIPELINE_STAGE(SWITCH, IN, ACL_AFTER_LB_ACTION, 20, \
+ PIPELINE_STAGE(SWITCH, IN, ACL_AFTER_LB_ACTION, 21, \
"ls_in_acl_after_lb_action") \
- PIPELINE_STAGE(SWITCH, IN, STATEFUL, 21, "ls_in_stateful") \
- PIPELINE_STAGE(SWITCH, IN, ARP_ND_RSP, 22, "ls_in_arp_rsp") \
- PIPELINE_STAGE(SWITCH, IN, DHCP_OPTIONS, 23, "ls_in_dhcp_options") \
- PIPELINE_STAGE(SWITCH, IN, DHCP_RESPONSE, 24, "ls_in_dhcp_response") \
- PIPELINE_STAGE(SWITCH, IN, DNS_LOOKUP, 25, "ls_in_dns_lookup") \
- PIPELINE_STAGE(SWITCH, IN, DNS_RESPONSE, 26, "ls_in_dns_response") \
- PIPELINE_STAGE(SWITCH, IN, EXTERNAL_PORT, 27, "ls_in_external_port") \
- PIPELINE_STAGE(SWITCH, IN, L2_LKUP, 28, "ls_in_l2_lkup") \
- PIPELINE_STAGE(SWITCH, IN, L2_UNKNOWN, 29, "ls_in_l2_unknown") \
+ PIPELINE_STAGE(SWITCH, IN, STATEFUL, 22, "ls_in_stateful") \
+ PIPELINE_STAGE(SWITCH, IN, ARP_ND_RSP, 23, "ls_in_arp_rsp") \
+ PIPELINE_STAGE(SWITCH, IN, DHCP_OPTIONS, 24, "ls_in_dhcp_options") \
+ PIPELINE_STAGE(SWITCH, IN, DHCP_RESPONSE, 25, "ls_in_dhcp_response") \
+ PIPELINE_STAGE(SWITCH, IN, DNS_LOOKUP, 26, "ls_in_dns_lookup") \
+ PIPELINE_STAGE(SWITCH, IN, DNS_RESPONSE, 27, "ls_in_dns_response") \
+ PIPELINE_STAGE(SWITCH, IN, EXTERNAL_PORT, 28, "ls_in_external_port") \
+ PIPELINE_STAGE(SWITCH, IN, L2_LKUP, 29, "ls_in_l2_lkup") \
+ PIPELINE_STAGE(SWITCH, IN, L2_UNKNOWN, 30, "ls_in_l2_unknown") \
\
/* Logical switch egress stages. */ \
PIPELINE_STAGE(SWITCH, OUT, LOOKUP_FDB, 0, "ls_out_lookup_fdb") \
@@ -514,10 +517,11 @@ enum ovn_stage {
PIPELINE_STAGE(SWITCH, OUT, ACL_EVAL, 6, "ls_out_acl_eval") \
PIPELINE_STAGE(SWITCH, OUT, ACL_SAMPLE, 7, "ls_out_acl_sample") \
PIPELINE_STAGE(SWITCH, OUT, ACL_ACTION, 8, "ls_out_acl_action") \
- PIPELINE_STAGE(SWITCH, OUT, QOS, 9, "ls_out_qos") \
- PIPELINE_STAGE(SWITCH, OUT, STATEFUL, 10, "ls_out_stateful") \
- PIPELINE_STAGE(SWITCH, OUT, CHECK_PORT_SEC, 11, "ls_out_check_port_sec") \
- PIPELINE_STAGE(SWITCH, OUT, APPLY_PORT_SEC, 12, "ls_out_apply_port_sec") \
+ PIPELINE_STAGE(SWITCH, OUT, MIRROR, 9, "ls_out_mirror") \
+ PIPELINE_STAGE(SWITCH, OUT, QOS, 10, "ls_out_qos") \
+ PIPELINE_STAGE(SWITCH, OUT, STATEFUL, 11, "ls_out_stateful") \
+ PIPELINE_STAGE(SWITCH, OUT, CHECK_PORT_SEC, 12, "ls_out_check_port_sec") \
+ PIPELINE_STAGE(SWITCH, OUT, APPLY_PORT_SEC, 13, "ls_out_apply_port_sec") \
\
/* Logical router ingress stages. */ \
PIPELINE_STAGE(ROUTER, IN, ADMISSION, 0, "lr_in_admission") \
@@ -688,6 +692,10 @@ struct ovn_port {
* to NULL. */
struct ovn_port *cr_port;
+ /* If this ovn_port is a mirror serving port, this field is set for
+ * a parent port. */
+ struct ovn_port *mirror_target_port;
+
bool has_unknown; /* If the addresses have 'unknown' defined. */
/* The port's peer:
@@ -398,7 +398,35 @@
</li>
</ul>
- <h3>Ingress Table 2: Lookup MAC address learning table</h3>
+ <h3>Ingress Table 2: Mirror </h3>
+
+ <p>
+ Overlay remote mirror table contains the following
+ logical flows:
+ </p>
+
+ <ul>
+ <li>
+ For each logical switch port with an attached mirror, a logical flow
+ with a priority of 100 is added. This flow matches all incoming packets
+ to the attached port, clones them, and forwards the cloned packets to
+ the mirror target port.
+ </li>
+
+ <li>
+ A priority 0 flow is added which matches on all packets and applies
+ the action <code>next;</code>.
+ </li>
+
+ <li>
+ A logical flow added for each Mirror Rule in Mirror table attached
+ to logical switch ports, matches all incoming packets that match
+ rules and clones the packet and sends cloned packet to mirror
+ target port.
+ </li>
+ </ul>
+
+ <h3>Ingress Table 3: Lookup MAC address learning table</h3>
<p>
This table looks up the MAC learning table of the logical switch
@@ -450,7 +478,7 @@
</li>
</ul>
- <h3>Ingress Table 3: Learn MAC of 'unknown' ports.</h3>
+ <h3>Ingress Table 4: Learn MAC of 'unknown' ports.</h3>
<p>
This table learns the MAC addresses seen on the VIF or 'switch' logical
@@ -488,7 +516,7 @@
</li>
</ul>
- <h3>Ingress Table 4: <code>from-lport</code> Pre-ACLs</h3>
+ <h3>Ingress Table 5: <code>from-lport</code> Pre-ACLs</h3>
<p>
This table prepares flows for possible stateful ACL processing in
@@ -522,7 +550,7 @@
db="OVN_Northbound"/> table.
</p>
- <h3>Ingress Table 5: Pre-LB</h3>
+ <h3>Ingress Table 6: Pre-LB</h3>
<p>
This table prepares flows for possible stateful load balancing processing
@@ -598,7 +626,7 @@
logical router datapath to logical switch datapath.
</p>
- <h3>Ingress Table 6: Pre-stateful</h3>
+ <h3>Ingress Table 7: Pre-stateful</h3>
<p>
This table prepares flows for all possible stateful processing
@@ -632,7 +660,7 @@
</li>
</ul>
- <h3>Ingress Table 7: <code>from-lport</code> ACL hints</h3>
+ <h3>Ingress Table 8: <code>from-lport</code> ACL hints</h3>
<p>
This table consists of logical flows that set hints
@@ -717,7 +745,7 @@
</li>
</ul>
- <h3>Ingress table 8: <code>from-lport</code> ACL evaluation before LB</h3>
+ <h3>Ingress table 9: <code>from-lport</code> ACL evaluation before LB</h3>
<p>
Logical flows in this table closely reproduce those in the
@@ -896,7 +924,7 @@
</li>
</ul>
- <h3>Ingress Table 9: <code>from-lport</code> ACL sampling</h3>
+ <h3>Ingress Table 10: <code>from-lport</code> ACL sampling</h3>
<p>
Logical flows in this table sample traffic matched by
@@ -936,7 +964,7 @@
</li>
</ul>
- <h3>Ingress Table 10: <code>from-lport</code> ACL action</h3>
+ <h3>Ingress Table 11: <code>from-lport</code> ACL action</h3>
<p>
Logical flows in this table decide how to proceed based on the values of
@@ -976,7 +1004,7 @@
</li>
</ul>
- <h3>Ingress Table 11: <code>from-lport</code> QoS</h3>
+ <h3>Ingress Table 12: <code>from-lport</code> QoS</h3>
<p>
Logical flows in this table closely reproduce those in the
@@ -999,7 +1027,7 @@
</li>
</ul>
- <h3>Ingress Table 12: Load balancing affinity check</h3>
+ <h3>Ingress Table 13: Load balancing affinity check</h3>
<p>
Load balancing affinity check table contains the following
@@ -1027,7 +1055,7 @@
</li>
</ul>
- <h3>Ingress Table 13: LB</h3>
+ <h3>Ingress Table 14: LB</h3>
<ul>
<li>
@@ -1107,7 +1135,7 @@
</li>
</ul>
- <h3>Ingress Table 14: Load balancing affinity learn</h3>
+ <h3>Ingress Table 15: Load balancing affinity learn</h3>
<p>
Load balancing affinity learn table contains the following
@@ -1138,7 +1166,7 @@
</li>
</ul>
- <h3>Ingress Table 15: Pre-Hairpin</h3>
+ <h3>Ingress Table 16: Pre-Hairpin</h3>
<ul>
<li>
If the logical switch has load balancer(s) configured, then a
@@ -1156,7 +1184,7 @@
</li>
</ul>
- <h3>Ingress Table 16: Nat-Hairpin</h3>
+ <h3>Ingress Table 17: Nat-Hairpin</h3>
<ul>
<li>
If the logical switch has load balancer(s) configured, then a
@@ -1191,7 +1219,7 @@
</li>
</ul>
- <h3>Ingress Table 17: Hairpin</h3>
+ <h3>Ingress Table 18: Hairpin</h3>
<ul>
<li>
<p>
@@ -1229,7 +1257,7 @@
</li>
</ul>
- <h3>Ingress table 18: <code>from-lport</code> ACL evaluation after LB</h3>
+ <h3>Ingress table 19: <code>from-lport</code> ACL evaluation after LB</h3>
<p>
Logical flows in this table closely reproduce those in the
@@ -1314,7 +1342,7 @@
</li>
</ul>
- <h3>Ingress Table 19: <code>from-lport</code> ACL sampling after LB</h3>
+ <h3>Ingress Table 20: <code>from-lport</code> ACL sampling after LB</h3>
<p>
Logical flows in this table sample traffic matched by
@@ -1354,7 +1382,7 @@
</li>
</ul>
- <h3>Ingress Table 20: <code>from-lport</code> ACL action after LB</h3>
+ <h3>Ingress Table 21: <code>from-lport</code> ACL action after LB</h3>
<p>
Logical flows in this table decide how to proceed based on the values of
@@ -1394,7 +1422,7 @@
</li>
</ul>
- <h3>Ingress Table 21: Stateful</h3>
+ <h3>Ingress Table 22: Stateful</h3>
<ul>
<li>
@@ -1417,7 +1445,7 @@
</li>
</ul>
- <h3>Ingress Table 22: ARP/ND responder</h3>
+ <h3>Ingress Table 23: ARP/ND responder</h3>
<p>
This table implements ARP/ND responder in a logical switch for known
@@ -1752,7 +1780,7 @@ output;
</li>
</ul>
- <h3>Ingress Table 23: DHCP option processing</h3>
+ <h3>Ingress Table 24: DHCP option processing</h3>
<p>
This table adds the DHCPv4 options to a DHCPv4 packet from the
@@ -1813,7 +1841,7 @@ next;
</li>
</ul>
- <h3>Ingress Table 24: DHCP responses</h3>
+ <h3>Ingress Table 25: DHCP responses</h3>
<p>
This table implements DHCP responder for the DHCP replies generated by
@@ -1894,7 +1922,7 @@ output;
</li>
</ul>
- <h3>Ingress Table 25 DNS Lookup</h3>
+ <h3>Ingress Table 26 DNS Lookup</h3>
<p>
This table looks up and resolves the DNS names to the corresponding
@@ -1923,7 +1951,7 @@ reg0[4] = dns_lookup(); next;
</li>
</ul>
- <h3>Ingress Table 26 DNS Responses</h3>
+ <h3>Ingress Table 27 DNS Responses</h3>
<p>
This table implements DNS responder for the DNS replies generated by
@@ -1958,7 +1986,7 @@ output;
</li>
</ul>
- <h3>Ingress table 27 External ports</h3>
+ <h3>Ingress table 28 External ports</h3>
<p>
Traffic from the <code>external</code> logical ports enter the ingress
@@ -2001,7 +2029,7 @@ output;
</li>
</ul>
- <h3>Ingress Table 28 Destination Lookup</h3>
+ <h3>Ingress Table 29 Destination Lookup</h3>
<p>
This table implements switching behavior. It contains these logical
@@ -2227,7 +2255,7 @@ output;
</li>
</ul>
- <h3>Ingress Table 29 Destination unknown</h3>
+ <h3>Ingress Table 30 Destination unknown</h3>
<p>
This table handles the packets whose destination was not found or
@@ -2463,21 +2491,49 @@ output;
This is similar to ingress table <code>ACL action</code>.
</p>
- <h3>Egress Table 9: <code>to-lport</code> QoS</h3>
+ <h3>Egress Table 9: Mirror </h3>
+
+ <p>
+ Overlay remote mirror table contains the following
+ logical flows:
+ </p>
+
+ <ul>
+ <li>
+ For each logical switch port with an attached mirror, a logical flow
+ with a priority of 100 is added. This flow matches all outcoming
+ packets to the attached port, clones them, and forwards the cloned
+ packets to the mirror target port.
+ </li>
+
+ <li>
+ A priority 0 flow is added which matches on all packets and applies
+ the action <code>next;</code>.
+ </li>
+
+ <li>
+ A logical flow added for each Mirror Rule in Mirror table attached
+ to logical switch ports, matches all outcoming packets that match
+ rules and clones the packet and sends cloned packet to mirror
+ target port.
+ </li>
+ </ul>
+
+ <h3>Egress Table 10: <code>to-lport</code> QoS</h3>
<p>
This is similar to ingress table <code>QoS</code> except
they apply to <code>to-lport</code> QoS rules.
</p>
- <h3>Egress Table 10: Stateful</h3>
+ <h3>Egress Table 11: Stateful</h3>
<p>
This is similar to ingress table <code>Stateful</code> except that
there are no rules added for load balancing new connections.
</p>
- <h3>Egress Table 11: Egress Port Security - check</h3>
+ <h3>Egress Table 12: Egress Port Security - check</h3>
<p>
This is similar to the port security logic in table
@@ -2506,7 +2562,7 @@ output;
</li>
</ul>
- <h3>Egress Table 12: Egress Port Security - Apply</h3>
+ <h3>Egress Table 13: Egress Port Security - Apply</h3>
<p>
This is similar to the ingress port security logic in ingress table
@@ -1,7 +1,7 @@
{
"name": "OVN_Southbound",
- "version": "21.1.0",
- "cksum": "880279393 34779",
+ "version": "21.2.0",
+ "cksum": "29145795 34859",
"tables": {
"SB_Global": {
"columns": {
@@ -229,6 +229,7 @@
"minInteger": 1,
"maxInteger": 32767}}},
"parent_port": {"type": {"key": "string", "min": 0, "max": 1}},
+ "mirror_port": {"type": {"key": "string", "min": 0, "max": 1}},
"tag": {
"type": {"key": {"type": "integer",
"minInteger": 1,
@@ -2921,6 +2921,20 @@ tcp.flags = RST;
</p>
</dd>
+ <dt><code>mirror(<var>P</var>);</code></dt>
+ <dd>
+ <p>
+ <b>Parameters</b>: logical port string field <var>P</var>
+ of type <code>mirror</code>.
+ </p>
+
+ <p>
+ When using an lport mirror, it clones the packet and outputs it
+ to the local/remote chassis through the mirrored port <var>P</var>
+ to the target port.
+ </p>
+ </dd>
+
</dl>
</column>
@@ -3467,6 +3481,10 @@ tcp.flags = RST;
</p>
</column>
+ <column name="mirror_port">
+ Points to mirror target port fot lport mirror type.
+ </column>
+
<column name="type">
<p>
A type for this logical port. Logical ports can be used to model other
@@ -1468,11 +1468,11 @@ m4_define([OVN_CONTROLLER_EXIT],
m4_define([OFTABLE_PHY_TO_LOG], [0])
m4_define([OFTABLE_LOG_INGRESS_PIPELINE], [8])
-m4_define([OFTABLE_OUTPUT_LARGE_PKT_DETECT], [40])
-m4_define([OFTABLE_OUTPUT_LARGE_PKT_PROCESS], [41])
-m4_define([OFTABLE_REMOTE_OUTPUT], [42])
-m4_define([OFTABLE_LOCAL_OUTPUT], [43])
-m4_define([OFTABLE_LOG_EGRESS_PIPELINE], [45])
+m4_define([OFTABLE_OUTPUT_LARGE_PKT_DETECT], [41])
+m4_define([OFTABLE_OUTPUT_LARGE_PKT_PROCESS], [42])
+m4_define([OFTABLE_REMOTE_OUTPUT], [43])
+m4_define([OFTABLE_LOCAL_OUTPUT], [44])
+m4_define([OFTABLE_LOG_EGRESS_PIPELINE], [46])
m4_define([OFTABLE_SAVE_INPORT], [64])
m4_define([OFTABLE_LOG_TO_PHY], [65])
m4_define([OFTABLE_MAC_BINDING], [66])
@@ -16927,3 +16927,328 @@ AT_CHECK([ovn_strip_lflows < lrflows | grep priority=105 | grep -c "flags.force_
AT_CLEANUP
])
+
+OVN_FOR_EACH_NORTHD_NO_HV([
+AT_SETUP([Check NB mirror rules sync])
+AT_KEYWORDS([mirror rules])
+ovn_start
+
+check ovn-nbctl ls-add ls1
+check ovn-nbctl lsp-add ls1 lport4
+
+check ovn-nbctl mirror-add mirror1 lport to-lport lport4
+
+check_column mirror1 nb:Mirror name
+check_column lport nb:Mirror type
+check_column to-lport nb:Mirror filter
+check_column lport4 nb:Mirror sink
+
+check ovn-nbctl mirror-rule-add mirror1 100 'ip' skip
+
+check_column 100 nb:mirror_rule priority
+check_column ip nb:mirror_rule match
+check_column skip nb:mirror_rule action
+
+check ovn-nbctl set mirror_rule . priority=150
+check_column 150 nb:mirror_rule priority
+check_column ip nb:mirror_rule match
+check_column skip nb:mirror_rule action
+
+check ovn-nbctl set mirror_rule . match='icmp'
+check_column 150 nb:mirror_rule priority
+check_column icmp nb:mirror_rule match
+check_column skip nb:mirror_rule action
+
+check ovn-nbctl set mirror_rule . action=mirror
+check_column 150 nb:mirror_rule priority
+check_column icmp nb:mirror_rule match
+check_column mirror nb:mirror_rule action
+
+dnl Mirror rule attach mirror
+mirrorrule1uuid=$(fetch_column nb:mirror_rule _uuid priority=100)
+check_column "$mirrorrule1uuid" nb:Mirror mirror_rules mirror_rules="$mirrorrule1uuid"
+
+check ovn-nbctl mirror-rule-add mirror1 200 'ip' mirror
+mirrorrule2uuid=$(fetch_column nb:mirror_rule _uuid priority=150)
+check_column "$mirrorrule1uuid" nb:Mirror mirror_rules mirror_rules="$mirrorrule1uuid","$mirrorrule2uuid"
+
+check ovn-nbctl mirror-rule-del mirror1 200
+check_column "$mirrorrule1uuid" nb:Mirror mirror_rules mirror_rules="$mirrorrule1uuid"
+
+check ovn-nbctl mirror-rule-del mirror1
+check_column "$mirrorrule1uuid" nb:Mirror mirror_rules mirror_rules=""
+
+AT_CLEANUP
+])
+
+OVN_FOR_EACH_NORTHD_NO_HV([
+AT_SETUP([Mirror rule lflows])
+AT_KEYWORDS([mirror rules])
+ovn_start
+
+check ovn-nbctl ls-add sw0
+check ovn-nbctl ls-add sw1
+
+check ovn-nbctl lsp-add sw0 sw0-p1 -- lsp-set-addresses sw0-p1 "50:54:00:00:00:01 10.0.0.11"
+check ovn-nbctl lsp-add sw0 sw0-target0
+check ovn-nbctl lsp-add sw1 sw1-target1
+
+check ovn-nbctl mirror-add mirror0 lport to-lport sw0-target0
+check ovn-nbctl mirror-add mirror1 lport both sw0-target0
+check ovn-nbctl mirror-add mirror2 lport from-lport sw1-target1
+
+check ovn-nbctl mirror-rule-add mirror1 100 'ip' mirror
+check ovn-nbctl mirror-rule-add mirror1 150 'icmp' skip
+check ovn-nbctl mirror-rule-add mirror2 100 'ip4.dst == 192.168.0.1' mirror
+check ovn-nbctl mirror-rule-add mirror2 150 '1' skip
+
+check ovn-nbctl --wait=hv sync
+
+ovn-sbctl lflow-list sw0 > lflow-list
+
+AT_CHECK([grep -e "ls_in_mirror" -e "ls_out_mirror" lflow-list | ovn_strip_lflows], [0], [dnl
+ table=??(ls_in_mirror ), priority=0 , match=(1), action=(next;)
+ table=??(ls_out_mirror ), priority=0 , match=(1), action=(next;)
+])
+
+check ovn-nbctl lsp-attach-mirror sw0-p1 mirror0
+
+check ovn-nbctl --wait=sb sync
+
+check_column sw0-target0 Port_Binding mirror_port logical_port=mp-sw0-sw0-target0
+check_column mirror Port_Binding type logical_port=mp-sw0-sw0-target0
+
+ovn-sbctl lflow-list sw0 > lflow-list
+AT_CHECK([grep -e "ls_in_mirror" -e "ls_out_mirror" lflow-list | ovn_strip_lflows], [0], [dnl
+ table=??(ls_in_mirror ), priority=0 , match=(1), action=(next;)
+ table=??(ls_out_mirror ), priority=0 , match=(1), action=(next;)
+ table=??(ls_out_mirror ), priority=100 , match=(outport == "sw0-p1"), action=(mirror("mp-sw0-sw0-target0"); next;)
+])
+
+ovn-sbctl lflow-list sw0 > lflow-list
+AT_CHECK([grep -e "ls_out_pre_acl" lflow-list | ovn_strip_lflows], [0], [dnl
+ table=??(ls_out_pre_acl ), priority=0 , match=(1), action=(next;)
+ table=??(ls_out_pre_acl ), priority=110 , match=(eth.src == $svc_monitor_mac), action=(next;)
+ table=??(ls_out_pre_acl ), priority=65535, match=(outport == "mp-sw0-sw0-target0"), action=(next(pipeline=egress, table=??);)
+])
+
+check ovn-nbctl lsp-attach-mirror sw0-p1 mirror1
+
+check ovn-nbctl --wait=sb sync
+
+check_row_count Port_Binding 1 logical_port=mp-sw0-sw0-target0
+
+ovn-sbctl lflow-list sw0 > lflow-list
+AT_CHECK([grep -e "ls_in_mirror" -e "ls_out_mirror" lflow-list | ovn_strip_lflows], [0], [dnl
+ table=??(ls_in_mirror ), priority=0 , match=(1), action=(next;)
+ table=??(ls_in_mirror ), priority=100 , match=(inport == "sw0-p1"), action=(mirror("mp-sw0-sw0-target0"); next;)
+ table=??(ls_in_mirror ), priority=200 , match=(inport == "sw0-p1" && (ip)), action=(mirror("mp-sw0-sw0-target0"); next;)
+ table=??(ls_in_mirror ), priority=250 , match=(inport == "sw0-p1" && (icmp)), action=(next;)
+ table=??(ls_out_mirror ), priority=0 , match=(1), action=(next;)
+ table=??(ls_out_mirror ), priority=100 , match=(outport == "sw0-p1"), action=(mirror("mp-sw0-sw0-target0"); next;)
+ table=??(ls_out_mirror ), priority=200 , match=(outport == "sw0-p1" && (ip)), action=(mirror("mp-sw0-sw0-target0"); next;)
+ table=??(ls_out_mirror ), priority=250 , match=(outport == "sw0-p1" && (icmp)), action=(next;)
+])
+
+check ovn-nbctl lsp-attach-mirror sw0-p1 mirror2
+
+check ovn-nbctl --wait=hv sync
+
+check_column sw1-target1 Port_Binding mirror_port logical_port=mp-sw0-sw1-target1
+check_column mirror Port_Binding type logical_port=mp-sw0-sw1-target1
+
+ovn-sbctl lflow-list sw0 > lflow-list
+AT_CHECK([grep -e "ls_in_mirror" -e "ls_out_mirror" lflow-list | ovn_strip_lflows], [0], [dnl
+ table=??(ls_in_mirror ), priority=0 , match=(1), action=(next;)
+ table=??(ls_in_mirror ), priority=100 , match=(inport == "sw0-p1"), action=(mirror("mp-sw0-sw0-target0"); next;)
+ table=??(ls_in_mirror ), priority=100 , match=(inport == "sw0-p1"), action=(mirror("mp-sw0-sw1-target1"); next;)
+ table=??(ls_in_mirror ), priority=200 , match=(inport == "sw0-p1" && (ip)), action=(mirror("mp-sw0-sw0-target0"); next;)
+ table=??(ls_in_mirror ), priority=200 , match=(inport == "sw0-p1" && (ip4.dst == 192.168.0.1)), action=(mirror("mp-sw0-sw1-target1"); next;)
+ table=??(ls_in_mirror ), priority=250 , match=(inport == "sw0-p1" && (1)), action=(next;)
+ table=??(ls_in_mirror ), priority=250 , match=(inport == "sw0-p1" && (icmp)), action=(next;)
+ table=??(ls_out_mirror ), priority=0 , match=(1), action=(next;)
+ table=??(ls_out_mirror ), priority=100 , match=(outport == "sw0-p1"), action=(mirror("mp-sw0-sw0-target0"); next;)
+ table=??(ls_out_mirror ), priority=200 , match=(outport == "sw0-p1" && (ip)), action=(mirror("mp-sw0-sw0-target0"); next;)
+ table=??(ls_out_mirror ), priority=250 , match=(outport == "sw0-p1" && (icmp)), action=(next;)
+])
+
+ovn-sbctl lflow-list sw0 > lflow-list
+AT_CHECK([grep -e "ls_out_pre_acl" lflow-list | ovn_strip_lflows], [0], [dnl
+ table=??(ls_out_pre_acl ), priority=0 , match=(1), action=(next;)
+ table=??(ls_out_pre_acl ), priority=110 , match=(eth.src == $svc_monitor_mac), action=(next;)
+ table=??(ls_out_pre_acl ), priority=65535, match=(outport == "mp-sw0-sw0-target0"), action=(next(pipeline=egress, table=??);)
+ table=??(ls_out_pre_acl ), priority=65535, match=(outport == "mp-sw0-sw1-target1"), action=(next(pipeline=egress, table=??);)
+])
+
+check ovn-nbctl lsp-del sw0-target0
+
+check ovn-nbctl --wait=hv sync
+
+check_row_count Port_Binding 0 logical_port=mp-sw0-sw0-target0
+
+ovn-sbctl lflow-list sw0 > lflow-list
+AT_CHECK([grep -e "ls_in_mirror" -e "ls_out_mirror" lflow-list | ovn_strip_lflows], [0], [dnl
+ table=??(ls_in_mirror ), priority=0 , match=(1), action=(next;)
+ table=??(ls_in_mirror ), priority=100 , match=(inport == "sw0-p1"), action=(mirror("mp-sw0-sw1-target1"); next;)
+ table=??(ls_in_mirror ), priority=200 , match=(inport == "sw0-p1" && (ip4.dst == 192.168.0.1)), action=(mirror("mp-sw0-sw1-target1"); next;)
+ table=??(ls_in_mirror ), priority=250 , match=(inport == "sw0-p1" && (1)), action=(next;)
+ table=??(ls_out_mirror ), priority=0 , match=(1), action=(next;)
+])
+
+AT_CHECK([grep -e "ls_out_pre_acl" lflow-list | ovn_strip_lflows], [0], [dnl
+ table=??(ls_out_pre_acl ), priority=0 , match=(1), action=(next;)
+ table=??(ls_out_pre_acl ), priority=110 , match=(eth.src == $svc_monitor_mac), action=(next;)
+ table=??(ls_out_pre_acl ), priority=65535, match=(outport == "mp-sw0-sw1-target1"), action=(next(pipeline=egress, table=??);)
+])
+
+check ovn-nbctl lsp-add sw1 sw0-target0
+
+check ovn-nbctl --wait=hv sync
+
+check_column sw1-target1 Port_Binding mirror_port logical_port=mp-sw0-sw1-target1
+check_column mirror Port_Binding type logical_port=mp-sw0-sw1-target1
+
+ovn-sbctl lflow-list sw0 > lflow-list
+AT_CHECK([grep -e "ls_in_mirror" -e "ls_out_mirror" lflow-list | ovn_strip_lflows], [0], [dnl
+ table=??(ls_in_mirror ), priority=0 , match=(1), action=(next;)
+ table=??(ls_in_mirror ), priority=100 , match=(inport == "sw0-p1"), action=(mirror("mp-sw0-sw0-target0"); next;)
+ table=??(ls_in_mirror ), priority=100 , match=(inport == "sw0-p1"), action=(mirror("mp-sw0-sw1-target1"); next;)
+ table=??(ls_in_mirror ), priority=200 , match=(inport == "sw0-p1" && (ip)), action=(mirror("mp-sw0-sw0-target0"); next;)
+ table=??(ls_in_mirror ), priority=200 , match=(inport == "sw0-p1" && (ip4.dst == 192.168.0.1)), action=(mirror("mp-sw0-sw1-target1"); next;)
+ table=??(ls_in_mirror ), priority=250 , match=(inport == "sw0-p1" && (1)), action=(next;)
+ table=??(ls_in_mirror ), priority=250 , match=(inport == "sw0-p1" && (icmp)), action=(next;)
+ table=??(ls_out_mirror ), priority=0 , match=(1), action=(next;)
+ table=??(ls_out_mirror ), priority=100 , match=(outport == "sw0-p1"), action=(mirror("mp-sw0-sw0-target0"); next;)
+ table=??(ls_out_mirror ), priority=200 , match=(outport == "sw0-p1" && (ip)), action=(mirror("mp-sw0-sw0-target0"); next;)
+ table=??(ls_out_mirror ), priority=250 , match=(outport == "sw0-p1" && (icmp)), action=(next;)
+])
+
+mirror1uuid_uuid=`ovn-nbctl --bare --columns _uuid find Mirror name="mirror1"`
+
+ovn-nbctl set mirror $mirror1uuid_uuid sink=sw1-target1
+
+check ovn-nbctl --wait=hv sync
+
+ovn-sbctl lflow-list sw0 > lflow-list
+AT_CHECK([grep -e "ls_in_mirror" -e "ls_out_mirror" lflow-list | ovn_strip_lflows], [0], [dnl
+ table=??(ls_in_mirror ), priority=0 , match=(1), action=(next;)
+ table=??(ls_in_mirror ), priority=100 , match=(inport == "sw0-p1"), action=(mirror("mp-sw0-sw1-target1"); next;)
+ table=??(ls_in_mirror ), priority=200 , match=(inport == "sw0-p1" && (ip)), action=(mirror("mp-sw0-sw1-target1"); next;)
+ table=??(ls_in_mirror ), priority=200 , match=(inport == "sw0-p1" && (ip4.dst == 192.168.0.1)), action=(mirror("mp-sw0-sw1-target1"); next;)
+ table=??(ls_in_mirror ), priority=250 , match=(inport == "sw0-p1" && (1)), action=(next;)
+ table=??(ls_in_mirror ), priority=250 , match=(inport == "sw0-p1" && (icmp)), action=(next;)
+ table=??(ls_out_mirror ), priority=0 , match=(1), action=(next;)
+ table=??(ls_out_mirror ), priority=100 , match=(outport == "sw0-p1"), action=(mirror("mp-sw0-sw0-target0"); next;)
+ table=??(ls_out_mirror ), priority=100 , match=(outport == "sw0-p1"), action=(mirror("mp-sw0-sw1-target1"); next;)
+ table=??(ls_out_mirror ), priority=200 , match=(outport == "sw0-p1" && (ip)), action=(mirror("mp-sw0-sw1-target1"); next;)
+ table=??(ls_out_mirror ), priority=250 , match=(outport == "sw0-p1" && (icmp)), action=(next;)
+])
+
+check_row_count Port_Binding 1 logical_port=mp-sw0-sw0-target0
+
+check ovn-nbctl mirror-del mirror0
+
+check_row_count Port_Binding 0 logical_port=mp-sw0-sw0-target0
+
+check ovn-nbctl mirror-del
+
+check ovn-nbctl --wait=sb sync
+
+ovn-sbctl lflow-list sw0 > lflow-list
+AT_CHECK([grep -e "ls_in_mirror" -e "ls_out_mirror" lflow-list | ovn_strip_lflows], [0], [dnl
+ table=??(ls_in_mirror ), priority=0 , match=(1), action=(next;)
+ table=??(ls_out_mirror ), priority=0 , match=(1), action=(next;)
+])
+
+ovn-sbctl lflow-list sw0 > lflow-list
+AT_CHECK([grep -e "ls_out_pre_acl" lflow-list | ovn_strip_lflows], [0], [dnl
+ table=??(ls_out_pre_acl ), priority=0 , match=(1), action=(next;)
+ table=??(ls_out_pre_acl ), priority=110 , match=(eth.src == $svc_monitor_mac), action=(next;)
+])
+
+AT_CLEANUP
+])
+
+OVN_FOR_EACH_NORTHD_NO_HV([
+AT_SETUP([ovn-detrace mirror rule check])
+AT_KEYWORDS([mirror rules])
+ovn_start
+
+check ovn-nbctl ls-add sw0
+check ovn-nbctl lsp-add sw0 sw0-p1
+check ovn-nbctl lsp-add sw0 sw0-rp
+
+check ovn-nbctl lsp-set-type sw0-rp router
+check ovn-nbctl lsp-set-addresses sw0-rp router
+check ovn-nbctl lsp-set-options sw0-rp router-port=lrp1
+
+check ovn-nbctl lsp-set-addresses sw0-p1 '00:00:00:00:00:01 1.1.1.1'
+
+check ovn-nbctl ls-add sw1
+check ovn-nbctl lsp-add sw1 sw1-target
+check ovn-nbctl lsp-add sw0 sw0-target
+
+check ovn-nbctl lr-add lr1
+check ovn-nbctl lrp-add lr1 lrp1 00:00:00:00:00:02 1.1.1.2/24
+
+check ovn-nbctl mirror-add mirror1 lport from-lport sw1-target
+check ovn-nbctl mirror-rule-add mirror1 100 '1' mirror
+
+check ovn-nbctl lsp-attach-mirror sw0-p1 mirror1
+
+check ovn-nbctl --wait=hv sync
+
+check_row_count Port_Binding 1 logical_port=mp-sw0-sw1-target
+
+ovn-sbctl lflow-list sw0 > lflow-list
+AT_CHECK([grep -e "ls_in_mirror" lflow-list | ovn_strip_lflows], [0], [dnl
+ table=??(ls_in_mirror ), priority=0 , match=(1), action=(next;)
+ table=??(ls_in_mirror ), priority=100 , match=(inport == "sw0-p1"), action=(mirror("mp-sw0-sw1-target"); next;)
+ table=??(ls_in_mirror ), priority=200 , match=(inport == "sw0-p1" && (1)), action=(mirror("mp-sw0-sw1-target"); next;)
+])
+
+AT_CHECK([ovn_trace --minimal 'inport == "sw0-p1" && eth.src == 00:00:00:00:00:01 && eth.dst == 00:00:00:00:00:02 && ip4.dst == 1.1.1.2 && ip4.src == 1.1.1.1 && ip.ttl == 64' > trace], [0], [ignore])
+
+AT_CHECK([cat trace | head -n 3], [0], [dnl
+clone {
+ output("mp-sw0-sw1-target");
+};
+])
+
+check ovn-nbctl mirror-rule-add mirror1 300 'arp' mirror
+check ovn-nbctl mirror-rule-add mirror1 250 '1' skip
+
+check ovn-nbctl --wait=hv sync
+
+ovn-sbctl lflow-list sw0 > lflow-list
+AT_CHECK([grep -e "ls_in_mirror" lflow-list | ovn_strip_lflows], [0], [dnl
+ table=??(ls_in_mirror ), priority=0 , match=(1), action=(next;)
+ table=??(ls_in_mirror ), priority=100 , match=(inport == "sw0-p1"), action=(mirror("mp-sw0-sw1-target"); next;)
+ table=??(ls_in_mirror ), priority=200 , match=(inport == "sw0-p1" && (1)), action=(mirror("mp-sw0-sw1-target"); next;)
+ table=??(ls_in_mirror ), priority=350 , match=(inport == "sw0-p1" && (1)), action=(next;)
+ table=??(ls_in_mirror ), priority=400 , match=(inport == "sw0-p1" && (arp)), action=(mirror("mp-sw0-sw1-target"); next;)
+])
+
+AT_CHECK([ovn_trace --minimal 'inport == "sw0-p1" && eth.src == 00:00:00:00:00:01 && eth.dst == 00:00:00:00:00:02 && ip4.dst == 1.1.1.2 && ip4.src == 1.1.1.1 && ip.ttl == 64' > trace], [0], [ignore])
+
+AT_CHECK([cat trace | grep clone], [1], [dnl
+])
+
+check ovn-nbctl mirror-del mirror1
+
+check ovn-nbctl --wait=hv sync
+
+AT_CHECK([ovn_trace --minimal 'inport == "sw0-p1" && eth.src == 00:00:00:00:00:01 && eth.dst == 00:00:00:00:00:02 && ip4.dst == 1.1.1.2 && ip4.src == 1.1.1.1 && ip.ttl == 64' > trace], [0], [ignore])
+
+ovn-sbctl lflow-list sw0 > lflow-list
+AT_CHECK([grep -e "ls_in_mirror" lflow-list | ovn_strip_lflows], [0], [dnl
+ table=??(ls_in_mirror ), priority=0 , match=(1), action=(next;)
+])
+
+AT_CHECK([cat trace | grep output], [0], [dnl
+ output("sw0-p1");
+ output("sw0-p1");
+])
+
+AT_CLEANUP
+])
@@ -803,6 +803,7 @@ m4_define([NEXT], [m4_if(
m4_define([oflow_in_table], [NEXT(ingress, lflow_table)])
m4_define([oflow_out_table], [NEXT(egress, lflow_table)])
+m4_define([remote_out_table], [OFTABLE_REMOTE_OUTPUT])
AT_DATA([test-cases.txt], [
# drop
@@ -2309,6 +2310,13 @@ ct_state_save;
ct_state_save();
Syntax error at `ct_state_save' expecting action.
+#mirror
+mirror("lsp1");
+ encodes as clone(set_field:ANY->in_port,set_field:0x11->reg15,resubmit(,remote_out_table))
+
+mirror(lsp1);
+ Syntax error at `lsp1' expecting port name string.
+
# Miscellaneous negative tests.
;
Syntax error at `;'.
@@ -588,6 +588,20 @@ ovntrace_port_find_by_key(const struct ovntrace_datapath *dp,
return NULL;
}
+static const struct ovntrace_port *
+ovntrace_port_find_by_name(const struct ovntrace_datapath *dp,
+ const char *name)
+{
+ const struct shash_node *node;
+ SHASH_FOR_EACH (node, &ports) {
+ const struct ovntrace_port *port = node->data;
+ if (port->dp == dp && !strcmp(port->name, name)) {
+ return port;
+ }
+ }
+ return NULL;
+}
+
static const char *
ovntrace_port_key_to_name(const struct ovntrace_datapath *dp,
uint16_t key)
@@ -3138,6 +3152,29 @@ execute_ct_save_state(const struct ovnact_result *dl, struct flow *uflow,
ds_destroy(&s);
}
+static void
+execute_mirror(const struct ovnact_mirror *mirror,
+ const struct ovntrace_datapath *dp,
+ struct flow *uflow, struct ovs_list *super)
+{
+ const struct ovntrace_port *port;
+ struct flow cloned_flow = *uflow;
+ port = ovntrace_port_find_by_name(dp, mirror->port);
+
+ if (port) {
+ struct ovntrace_node *node = ovntrace_node_append(super,
+ OVNTRACE_NODE_TRANSFORMATION, "clone");
+
+ cloned_flow.regs[MFF_LOG_INPORT - MFF_REG0] = 0;
+ cloned_flow.regs[MFF_LOG_OUTPORT - MFF_REG0] = port->tunnel_key;
+
+ trace__(dp, &cloned_flow, 0, OVNACT_P_EGRESS, &node->subs);
+ } else {
+ ovntrace_node_append(super, OVNTRACE_NODE_ERROR,
+ "/* omitting output because no taget port found. */");
+ }
+}
+
static void
trace_actions(const struct ovnact *ovnacts, size_t ovnacts_len,
const struct ovntrace_datapath *dp, struct flow *uflow,
@@ -3458,6 +3495,11 @@ trace_actions(const struct ovnact *ovnacts, size_t ovnacts_len,
execute_check_out_port_sec(ovnact_get_CHECK_OUT_PORT_SEC(a),
dp, uflow);
break;
+
+ case OVNACT_MIRROR:
+ execute_mirror(ovnact_get_MIRROR(a), dp, uflow, super);
+ break;
+
case OVNACT_COMMIT_ECMP_NH:
break;
case OVNACT_CHK_ECMP_NH_MAC: