@@ -142,9 +142,10 @@ enum ovn_stage {
PIPELINE_STAGE(ROUTER, IN, ND_RA_OPTIONS, 5, "lr_in_nd_ra_options") \
PIPELINE_STAGE(ROUTER, IN, ND_RA_RESPONSE, 6, "lr_in_nd_ra_response") \
PIPELINE_STAGE(ROUTER, IN, IP_ROUTING, 7, "lr_in_ip_routing") \
- PIPELINE_STAGE(ROUTER, IN, ARP_RESOLVE, 8, "lr_in_arp_resolve") \
- PIPELINE_STAGE(ROUTER, IN, GW_REDIRECT, 9, "lr_in_gw_redirect") \
- PIPELINE_STAGE(ROUTER, IN, ARP_REQUEST, 10, "lr_in_arp_request") \
+ PIPELINE_STAGE(ROUTER, IN, POLICY, 8, "lr_in_policy") \
+ PIPELINE_STAGE(ROUTER, IN, ARP_RESOLVE, 9, "lr_in_arp_resolve") \
+ PIPELINE_STAGE(ROUTER, IN, GW_REDIRECT, 10, "lr_in_gw_redirect") \
+ PIPELINE_STAGE(ROUTER, IN, ARP_REQUEST, 11, "lr_in_arp_request") \
\
/* Logical router egress stages. */ \
PIPELINE_STAGE(ROUTER, OUT, UNDNAT, 0, "lr_out_undnat") \
@@ -4633,6 +4634,111 @@ find_lrp_member_ip(const struct ovn_port *op, const char *ip_s)
return NULL;
}
+static struct ovn_port*
+get_outport_for_routing_policy_nexthop(struct ovn_datapath *od,
+ struct hmap *ports,
+ int priority, const char *nexthop)
+{
+ if (nexthop == NULL)
+ return NULL;
+
+ unsigned int plen = 0;
+ ovs_be32 nexthop_be32;
+ /* Verify that the next hop is an IP address with an all-ones mask. */
+ char *error = ip_parse_cidr(nexthop, &nexthop_be32, &plen);
+ if (!error) {
+ if (plen != 32) {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
+ VLOG_WARN_RL(&rl, "bad next hop ip %s for routing policy "
+ "with priority %d, error: %s ",
+ nexthop, priority, error);
+ return NULL;
+ }
+ } else {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
+ VLOG_WARN_RL(&rl, "Failed to parse cidr %s for routing policy "
+ "with priority %d ",
+ nexthop, priority);
+ free(error);
+ return NULL;
+ }
+
+ /* find the router port matching the next hop. */
+ int i;
+ struct ovn_port *out_port = NULL;
+ const char *lrp_addr_s = NULL;
+ for (i = 0; i < od->nbr->n_ports; i++) {
+ struct nbrec_logical_router_port *lrp = od->nbr->ports[i];
+ out_port = ovn_port_find(ports, lrp->name);
+ if (!out_port) {
+ /* This should not happen. */
+ continue;
+ }
+ lrp_addr_s = find_lrp_member_ip(out_port, nexthop);
+ if (lrp_addr_s) {
+ break;
+ } else {
+ out_port = NULL;
+ }
+ }
+ if (!out_port) {
+ /* There is no matched out port. */
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
+ VLOG_WARN_RL(&rl, "No path for routing policy priority %d; next hop %s",
+ priority, nexthop);
+ return NULL;
+ }
+ return out_port;
+}
+
+static void
+build_routing_policy_flow(struct hmap *lflows, struct ovn_datapath *od,
+ struct hmap *ports,
+ const struct nbrec_logical_router_policy *rule)
+{
+ struct ds match = DS_EMPTY_INITIALIZER;
+ struct ds actions = DS_EMPTY_INITIALIZER;
+
+ if (!strcmp(rule->action, "reroute")) {
+ struct ovn_port *out_port = NULL;
+ const char *lrp_addr_s = NULL;
+ out_port = get_outport_for_routing_policy_nexthop(
+ od, ports, rule->priority, rule->nexthop);
+ if (out_port == NULL) {
+ return;
+ } else {
+ lrp_addr_s = find_lrp_member_ip(out_port, rule->nexthop);
+ if (!lrp_addr_s) {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
+ VLOG_WARN_RL(&rl, "lrp_addr not found for routing policy "
+ " priority %d nexthop %s",
+ rule->priority, rule->nexthop);
+ return;
+ } else {
+ ds_put_format(&actions, "reg0 = %s; "
+ "reg1 = %s; "
+ "eth.src = %s; "
+ "outport = %s; "
+ "flags.loopback = 1; "
+ "next;",
+ rule->nexthop,
+ lrp_addr_s,
+ out_port->lrp_networks.ea_s,
+ out_port->json_key);
+ }
+ }
+ } else if (!strcmp(rule->action, "drop")) {
+ ds_put_cstr(&actions, "drop;");
+ } else if (!strcmp(rule->action, "allow")) {
+ ds_put_cstr(&actions, "next;");
+ }
+ ds_put_format(&match, "%s", rule->match);
+ ovn_lflow_add(lflows, od, S_ROUTER_IN_POLICY, rule->priority,
+ ds_cstr(&match), ds_cstr(&actions));
+ ds_destroy(&match);
+ ds_destroy(&actions);
+}
+
static void
add_route(struct hmap *lflows, const struct ovn_port *op,
const char *lrp_addr_s, const char *network_s, int plen,
@@ -6332,9 +6438,35 @@ build_lrouter_flows(struct hmap *datapaths, struct hmap *ports,
}
}
+ /* Logical router ingress table 8: Policy.
+ *
+ * A packet that arrives at this table is an IP packet that should be
+ * permitted/denied/rerouted to the address in the rule's nexthop.
+ * This table sets outport to the correct out_port,
+ * eth.src to the output port's MAC address,
+ * and '[xx]reg0' to the next-hop IP address (leaving
+ * 'ip[46].dst', the packet’s final destination, unchanged), and
+ * advances to the next table for ARP/ND resolution. */
+ HMAP_FOR_EACH (od, key_node, datapaths) {
+ if (!od->nbr) {
+ continue;
+ }
+ /* This is a catch-all rule. It has the lowest priority (0)
+ * does a match-all("1") and pass-through (next) */
+ ovn_lflow_add(lflows, od, S_ROUTER_IN_POLICY, 0, "1", "next;");
+
+ /* Convert routing policies to flows. */
+ for (int i = 0; i < od->nbr->n_policies; i++) {
+ const struct nbrec_logical_router_policy *rule;
+ rule = od->nbr->policies[i];
+ build_routing_policy_flow(lflows, od, ports, rule);
+ }
+ }
+
+
/* XXX destination unreachable */
- /* Local router ingress table 8: ARP Resolution.
+ /* Local router ingress table 9: ARP Resolution.
*
* Any packet that reaches this table is an IP packet whose next-hop IP
* address is in reg0. (ip4.dst is the final destination.) This table
@@ -6533,7 +6665,7 @@ build_lrouter_flows(struct hmap *datapaths, struct hmap *ports,
"get_nd(outport, xxreg0); next;");
}
- /* Logical router ingress table 9: Gateway redirect.
+ /* Logical router ingress table 10: Gateway redirect.
*
* For traffic with outport equal to the l3dgw_port
* on a distributed router, this table redirects a subset
@@ -6573,7 +6705,7 @@ build_lrouter_flows(struct hmap *datapaths, struct hmap *ports,
ovn_lflow_add(lflows, od, S_ROUTER_IN_GW_REDIRECT, 0, "1", "next;");
}
- /* Local router ingress table 10: ARP request.
+ /* Local router ingress table 11: ARP request.
*
* In the common case where the Ethernet destination has been resolved,
* this table outputs the packet (priority 0). Otherwise, it composes
Policy-based routing (PBR) provides a mechanism to configure permit/deny and reroute policies on the router. Permit/deny policies are similar to OVN ACLs, but exist on the logical-router. Reroute policies are needed for service-insertion and service-chaining. Currently, we support only stateless policies. To achieve this, a new table is introduced in the ingress pipeline of the Logical-router. The new table is between the ‘IP Routing’ and the ‘ARP/ND resolution’ table. This way, PBR can override routing decisions and provide a different next-hop. This Series: a. Changes in OVN NB Schema to introduce a new table in the Logical router. b. Add commands to ovn-nbctl to add/delete/list routing policies. c. Changes in ovn-northd to process routing-policy configurations. This Patch: ovn-northd changes to get routing-policies from northbound database and populate the same as logical flows in the southbound database. A new table called 'POLICY' is introduced in the Logical router's ingress pipeline. Each routing-policy configured in the northbound database translates into a single logical flow in the new table. The columns from the Logical_Router_Policy table are used as follows: The priority column is used as priority in the logical-flow. The match column is used as the 'match' string in the logical-flow. The action column is used to determine the action of the logical-flow. When the 'action' is reroute, if the nexthop ip-address is a connected router port or the IP address of a logical port, the logical-flow is constructed to route the packet to the nexthop ip-address. Signed-off-by: Mary Manohar <mary.manohar@nutanix.com> --- ovn/northd/ovn-northd.c | 144 ++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 138 insertions(+), 6 deletions(-)