@@ -141,9 +141,10 @@ enum ovn_stage {
PIPELINE_STAGE(ROUTER, IN, ND_RA_OPTIONS, 5, "lr_in_nd_ra_options") \
PIPELINE_STAGE(ROUTER, IN, ND_RA_RESPONSE, 6, "lr_in_nd_ra_response") \
PIPELINE_STAGE(ROUTER, IN, IP_ROUTING, 7, "lr_in_ip_routing") \
- PIPELINE_STAGE(ROUTER, IN, ARP_RESOLVE, 8, "lr_in_arp_resolve") \
- PIPELINE_STAGE(ROUTER, IN, GW_REDIRECT, 9, "lr_in_gw_redirect") \
- PIPELINE_STAGE(ROUTER, IN, ARP_REQUEST, 10, "lr_in_arp_request") \
+ PIPELINE_STAGE(ROUTER, IN, POLICY, 8, "lr_in_policy") \
+ PIPELINE_STAGE(ROUTER, IN, ARP_RESOLVE, 9, "lr_in_arp_resolve") \
+ PIPELINE_STAGE(ROUTER, IN, GW_REDIRECT, 10, "lr_in_gw_redirect") \
+ PIPELINE_STAGE(ROUTER, IN, ARP_REQUEST, 11, "lr_in_arp_request") \
\
/* Logical router egress stages. */ \
PIPELINE_STAGE(ROUTER, OUT, UNDNAT, 0, "lr_out_undnat") \
@@ -4598,6 +4599,111 @@ find_lrp_member_ip(const struct ovn_port *op, const char *ip_s)
return NULL;
}
+static struct ovn_port*
+get_outport_for_routing_policy_nexthop(struct ovn_datapath *od,
+ struct hmap *ports,
+ int priority, const char *nexthop)
+{
+ if (nexthop == NULL)
+ return NULL;
+
+ unsigned int plen = 0;
+ ovs_be32 nexthop_be32;
+ /* Verify that the next hop is an IP address with an all-ones mask. */
+ char *error = ip_parse_cidr(nexthop, &nexthop_be32, &plen);
+ if (!error) {
+ if (plen != 32) {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
+ VLOG_WARN_RL(&rl, "bad next hop ip %s for routing policy "
+ "with priority %d, error: %s ",
+ nexthop, priority, error);
+ return NULL;
+ }
+ } else {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
+ VLOG_WARN_RL(&rl, "Failed to parse cidr %s for routing policy "
+ "with priority %d ",
+ nexthop, priority);
+ free(error);
+ return NULL;
+ }
+
+ /* find the router port matching the next hop. */
+ int i;
+ struct ovn_port *out_port = NULL;
+ const char *lrp_addr_s = NULL;
+ for (i = 0; i < od->nbr->n_ports; i++) {
+ struct nbrec_logical_router_port *lrp = od->nbr->ports[i];
+ out_port = ovn_port_find(ports, lrp->name);
+ if (!out_port) {
+ /* This should not happen. */
+ continue;
+ }
+ lrp_addr_s = find_lrp_member_ip(out_port, nexthop);
+ if (lrp_addr_s) {
+ break;
+ } else {
+ out_port = NULL;
+ }
+ }
+ if (!out_port) {
+ /* There is no matched out port. */
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
+ VLOG_WARN_RL(&rl, "No path for routing policy priority %d; next hop %s",
+ priority, nexthop);
+ return NULL;
+ }
+ return out_port;
+}
+
+static void
+build_routing_policy_flow(struct hmap *lflows, struct ovn_datapath *od,
+ struct hmap *ports,
+ const struct nbrec_logical_router_policy *rule)
+{
+ struct ds match = DS_EMPTY_INITIALIZER;
+ struct ds actions = DS_EMPTY_INITIALIZER;
+
+ if (!strcmp(rule->action, "reroute")) {
+ struct ovn_port *out_port = NULL;
+ const char *lrp_addr_s = NULL;
+ out_port = get_outport_for_routing_policy_nexthop(
+ od, ports, rule->priority, rule->nexthop);
+ if (out_port == NULL) {
+ return;
+ } else {
+ lrp_addr_s = find_lrp_member_ip(out_port, rule->nexthop);
+ if (!lrp_addr_s) {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
+ VLOG_WARN_RL(&rl, "lrp_addr not found for routing policy "
+ " priority %d nexthop %s",
+ rule->priority, rule->nexthop);
+ return;
+ } else {
+ ds_put_format(&actions, "reg0 = %s; "
+ "reg1 = %s; "
+ "eth.src = %s; "
+ "outport = %s; "
+ "flags.loopback = 1; "
+ "next;",
+ rule->nexthop,
+ lrp_addr_s,
+ out_port->lrp_networks.ea_s,
+ out_port->json_key);
+ }
+ }
+ } else if (!strcmp(rule->action, "drop")) {
+ ds_put_cstr(&actions, "drop;");
+ } else if (!strcmp(rule->action, "allow")) {
+ ds_put_cstr(&actions, "next;");
+ }
+ ds_put_format(&match, "%s", rule->match);
+ ovn_lflow_add(lflows, od, S_ROUTER_IN_POLICY, rule->priority,
+ ds_cstr(&match), ds_cstr(&actions));
+ ds_destroy(&match);
+ ds_destroy(&actions);
+}
+
static void
add_route(struct hmap *lflows, const struct ovn_port *op,
const char *lrp_addr_s, const char *network_s, int plen,
@@ -6297,9 +6403,35 @@ build_lrouter_flows(struct hmap *datapaths, struct hmap *ports,
}
}
+ /* Logical router ingress table 8: Policy.
+ *
+ * A packet that arrives at this table is an IP packet that should be
+ * permitted/denied/rerouted to the address in the rule's nexthop.
+ * This table sets outport to the correct out_port,
+ * eth.src to the output port's MAC address,
+ * and '[xx]reg0' to the next-hop IP address (leaving
+ * 'ip[46].dst', the packet’s final destination, unchanged), and
+ * advances to the next table for ARP/ND resolution. */
+ HMAP_FOR_EACH (od, key_node, datapaths) {
+ if (!od->nbr) {
+ continue;
+ }
+ /* This is a catch-all rule. It has the lowest priority (0)
+ * does a match-all("1") and pass-through (next) */
+ ovn_lflow_add(lflows, od, S_ROUTER_IN_POLICY, 0, "1", "next;");
+
+ /* Convert routing policies to flows. */
+ for (int i = 0; i < od->nbr->n_policies; i++) {
+ const struct nbrec_logical_router_policy *rule;
+ rule = od->nbr->policies[i];
+ build_routing_policy_flow(lflows, od, ports, rule);
+ }
+ }
+
+
/* XXX destination unreachable */
- /* Local router ingress table 8: ARP Resolution.
+ /* Local router ingress table 9: ARP Resolution.
*
* Any packet that reaches this table is an IP packet whose next-hop IP
* address is in reg0. (ip4.dst is the final destination.) This table
@@ -6498,7 +6630,7 @@ build_lrouter_flows(struct hmap *datapaths, struct hmap *ports,
"get_nd(outport, xxreg0); next;");
}
- /* Logical router ingress table 9: Gateway redirect.
+ /* Logical router ingress table 10: Gateway redirect.
*
* For traffic with outport equal to the l3dgw_port
* on a distributed router, this table redirects a subset
@@ -6538,7 +6670,7 @@ build_lrouter_flows(struct hmap *datapaths, struct hmap *ports,
ovn_lflow_add(lflows, od, S_ROUTER_IN_GW_REDIRECT, 0, "1", "next;");
}
- /* Local router ingress table 10: ARP request.
+ /* Local router ingress table 11: ARP request.
*
* In the common case where the Ethernet destination has been resolved,
* this table outputs the packet (priority 0). Otherwise, it composes
@@ -1331,6 +1331,53 @@ IPv6 Routes
dnl ---------------------------------------------------------------------
+OVN_NBCTL_TEST([ovn_nbctl_policies], [policies], [
+AT_CHECK([ovn-nbctl lr-add lr0])
+
+dnl Add policies with allow and drop actions
+AT_CHECK([ovn-nbctl lr-policy-add lr0 100 "ip4.src == 1.1.1.0/24" drop])
+AT_CHECK([ovn-nbctl lr-policy-add lr0 100 "ip4.src == 1.1.2.0/24" allow])
+AT_CHECK([ovn-nbctl lr-policy-add lr0 101 "ip4.src == 2.1.1.0/24" allow])
+AT_CHECK([ovn-nbctl lr-policy-add lr0 101 "ip4.src == 2.1.2.0/24" drop])
+
+dnl Add duplicated policy
+AT_CHECK([ovn-nbctl lr-policy-add lr0 100 "ip4.src == 1.1.1.0/24" drop], [1], [],
+ [ovn-nbctl: Same routing policy already existed on the logical router lr0.
+])
+
+dnl Add duplicated policy
+AT_CHECK([ovn-nbctl lr-policy-add lr0 103 "ip4.src == 1.1.1.0/24" deny], [1], [],
+ [ovn-nbctl: deny: action must be one of "allow", "drop", and "reroute"
+])
+
+dnl Delete by priority and match string
+AT_CHECK([ovn-nbctl lr-policy-del lr0 100 "ip4.src == 1.1.1.0/24"])
+AT_CHECK([ovn-nbctl lr-policy-list lr0], [0], [dnl
+Routing Policies
+ 101 ip4.src == 2.1.1.0/24 allow
+ 101 ip4.src == 2.1.2.0/24 drop
+ 100 ip4.src == 1.1.2.0/24 allow
+])
+
+dnl Delete all policies for given priority
+AT_CHECK([ovn-nbctl lr-policy-del lr0 101])
+AT_CHECK([ovn-nbctl lr-policy-list lr0], [0], [dnl
+Routing Policies
+ 100 ip4.src == 1.1.2.0/24 allow
+])
+
+dnl Add policy with reroute action
+AT_CHECK([ovn-nbctl lr-policy-add lr0 102 "ip4.src == 3.1.2.0/24" reroute 3.3.3.3])
+
+dnl Add policy with invalid reroute ip
+AT_CHECK([ovn-nbctl lr-policy-add lr0 103 "ip4.src == 3.1.2.0/24" reroute 3.3.3.x], [1], [],
+ [ovn-nbctl: bad next hop argument: 3.3.3.x
+])
+])
+
+dnl ---------------------------------------------------------------------
+
+
OVN_NBCTL_TEST([ovn_nbctl_lsp_types], [lsp types], [
AT_CHECK([ovn-nbctl ls-add ls0])
AT_CHECK([ovn-nbctl lsp-add ls0 lp0])