@@ -352,6 +352,25 @@ dump_flow_pattern(struct ds *s, const struct rte_flow_item *item)
ds_put_cstr(s, " Mask = null\n");
}
ds_put_cstr(s, "/ ");
+ } else if (item->type == RTE_FLOW_ITEM_TYPE_GENEVE) {
+ const struct rte_flow_item_geneve *geneve_spec = item->spec;
+ const struct rte_flow_item_geneve *geneve_mask = item->mask;
+
+ ds_put_cstr(s, "rte flow geneve pattern:\n");
+ ds_put_cstr(s, "geneve ");
+ if (geneve_spec) {
+ ds_put_format(s, "vni spec %d ",
+ ntohl(*(ovs_be32 *)geneve_spec->vni) >> 8);
+ } else {
+ ds_put_cstr(s, " Spec = null\n");
+ }
+ if (geneve_mask) {
+ ds_put_format(s, "vni mask 0x%06x ",
+ ntohl(*(ovs_be32 *)geneve_mask->vni) >> 8);
+ } else {
+ ds_put_cstr(s, " Mask = null\n");
+ }
+ ds_put_cstr(s, "/ ");
} else {
ds_put_format(s, "unknown rte flow pattern (%d)\n", item->type);
}
@@ -422,6 +441,19 @@ dump_flow_action(struct ds *s, const struct rte_flow_action *actions)
}
} else if (actions->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) {
ds_put_format(s, "vxlan-decap: nop\n");
+ /* TBD
+ } else if (actions->type == RTE_FLOW_ACTION_TYPE_GENEVE_ENCAP) {
+ const struct rte_flow_action_geneve_encap *geneve_encap = actions->conf;
+ const struct rte_flow_item *items = geneve_encap->definition;
+
+ ds_put_cstr(s, "rte flow geneve-encap action:\n");
+ ds_put_cstr(s, "geneve_encap / ");
+ while (items && items->type != RTE_FLOW_ITEM_TYPE_END) {
+ dump_flow_pattern(s, items++);
+ }
+ } else if (actions->type == RTE_FLOW_ACTION_TYPE_GENEVE_DECAP) {
+ ds_put_format(s, "geneve-decap: nop\n");
+ */
} else if (actions->type == RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN) {
const struct rte_flow_action_of_push_vlan *vlan_tci = actions->conf;
@@ -670,6 +702,38 @@ parse_vxlan_match(struct flow_patterns *patterns,
}
static int
+parse_geneve_match(struct flow_patterns *patterns,
+ const struct match *match)
+{
+ struct rte_flow_item_geneve *vx_spec, *vx_mask;
+
+ if (is_all_zeros(&match->wc.masks.tunnel, sizeof match->wc.masks.tunnel)) {
+ return -1;
+ }
+ VLOG_DBG("TIMO:in parse_geneve_match");
+
+ /* no need to tranmit layer2/layer3/layer4 as
+ * for outer key fpga only support vni */
+ /*
+ ret = parse_tnl_ip_match(patterns, match, IPPROTO_UDP);
+ parse_tnl_udp_match(patterns, match);
+ */
+
+ /* GENEVE */
+ vx_spec = xzalloc(sizeof *vx_spec);
+ vx_mask = xzalloc(sizeof *vx_mask);
+
+ put_unaligned_be32((ovs_be32 *)vx_spec->vni,
+ htonl(ntohll(match->flow.tunnel.tun_id) << 8));
+ put_unaligned_be32((ovs_be32 *)vx_mask->vni,
+ htonl(ntohll(match->wc.masks.tunnel.tun_id) << 8));
+
+ add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_GENEVE, vx_spec, vx_mask);
+ /* TBD: add option hdr */
+ return 0;
+}
+
+static int
parse_flow_match(struct flow_patterns *patterns,
struct match *match,
struct netdev *netdev,
@@ -689,6 +753,9 @@ parse_flow_match(struct flow_patterns *patterns,
if (!strcmp(netdev_get_type(netdev), "vxlan") &&
!parse_vxlan_match(patterns, match)) {
memset(&consumed_masks->tunnel, 0, sizeof consumed_masks->tunnel);
+ } else if (!strcmp(netdev_get_type(netdev), "geneve") &&
+ !parse_geneve_match(patterns, match)) {
+ memset(&consumed_masks->tunnel, 0, sizeof consumed_masks->tunnel);
} else if (netdev_vport_is_vport_class(netdev->netdev_class)) {
VLOG_DBG("in port (%s) not supported", netdev->name);
return -1;
@@ -1232,6 +1299,96 @@ err:
return -1;
}
+#define ACTION_GENEVE_ENCAP_ITEMS_NUM 5
+
+static int
+add_geneve_encap_action(struct flow_actions *actions,
+ const void *header)
+{
+ const struct eth_header *eth;
+ const struct udp_header *udp;
+ struct geneve_data {
+ /* TBD
+ struct rte_flow_action_geneve_encap conf;
+ */
+ struct rte_flow_action_vxlan_encap conf;
+ struct rte_flow_item items[0];
+ } *geneve_data;
+ BUILD_ASSERT_DECL(offsetof(struct geneve_data, conf) == 0);
+ const void *geneve;
+ const void *l3;
+ const void *l4;
+ int field;
+
+ VLOG_DBG("TIMO DBG: in add_geneve_encap_action");
+ geneve_data = xzalloc(sizeof *geneve_data +
+ sizeof(struct rte_flow_item) *
+ ACTION_GENEVE_ENCAP_ITEMS_NUM);
+ field = 0;
+
+ eth = header;
+ /* Ethernet */
+ geneve_data->items[field].type = RTE_FLOW_ITEM_TYPE_ETH;
+ geneve_data->items[field].spec = eth;
+ geneve_data->items[field].mask = &rte_flow_item_eth_mask;
+ field++;
+
+ l3 = eth + 1;
+ /* IP */
+ if (eth->eth_type == htons(ETH_TYPE_IP)) {
+ /* IPv4 */
+ const struct ip_header *ip = l3;
+
+ geneve_data->items[field].type = RTE_FLOW_ITEM_TYPE_IPV4;
+ geneve_data->items[field].spec = ip;
+ geneve_data->items[field].mask = &rte_flow_item_ipv4_mask;
+
+ if (ip->ip_proto != IPPROTO_UDP) {
+ goto err;
+ }
+ l4 = (ip + 1);
+ } else if (eth->eth_type == htons(ETH_TYPE_IPV6)) {
+ const struct ovs_16aligned_ip6_hdr *ip6 = l3;
+
+ geneve_data->items[field].type = RTE_FLOW_ITEM_TYPE_IPV6;
+ geneve_data->items[field].spec = ip6;
+ geneve_data->items[field].mask = &rte_flow_item_ipv6_mask;
+
+ if (ip6->ip6_nxt != IPPROTO_UDP) {
+ goto err;
+ }
+ l4 = (ip6 + 1);
+ } else {
+ goto err;
+ }
+ field++;
+
+ udp = (const struct udp_header *)l4;
+ geneve_data->items[field].type = RTE_FLOW_ITEM_TYPE_UDP;
+ geneve_data->items[field].spec = udp;
+ geneve_data->items[field].mask = &rte_flow_item_udp_mask;
+ field++;
+
+ geneve = (udp + 1);
+ geneve_data->items[field].type = RTE_FLOW_ITEM_TYPE_GENEVE;
+ geneve_data->items[field].spec = geneve;
+ geneve_data->items[field].mask = &rte_flow_item_geneve_mask;
+ field++;
+
+ geneve_data->items[field].type = RTE_FLOW_ITEM_TYPE_END;
+
+ geneve_data->conf.definition = geneve_data->items;
+
+ /* TBD
+ add_flow_action(actions, RTE_FLOW_ACTION_TYPE_GENEVE_ENCAP, geneve_data);
+ */
+
+ return 0;
+err:
+ free(geneve_data);
+ return -1;
+}
+
static void
add_vxlan_decap_action(struct flow_actions *actions)
{
@@ -1239,6 +1396,16 @@ add_vxlan_decap_action(struct flow_actions *actions)
add_flow_action(actions, RTE_FLOW_ACTION_TYPE_VXLAN_DECAP, NULL);
}
+static void
+add_geneve_decap_action(struct flow_actions *actions)
+{
+ VLOG_DBG("TIMO DBG: in add_geneve_decap_action");
+ /* TBD need change to geneve decap
+ add_flow_action(actions, RTE_FLOW_ACTION_TYPE_GENEVE_DECAP, NULL);
+ */
+ add_flow_action(actions, RTE_FLOW_ACTION_TYPE_VXLAN_DECAP, NULL);
+}
+
static int
parse_clone_actions(struct netdev *netdev,
struct flow_actions *actions,
@@ -1261,6 +1428,9 @@ parse_clone_actions(struct netdev *netdev,
if (tnl_push->tnl_type == OVS_VPORT_TYPE_VXLAN &&
!add_vxlan_encap_action(actions, tnl_push->header)) {
continue;
+ } else if (tnl_push->tnl_type == OVS_VPORT_TYPE_GENEVE &&
+ !add_geneve_encap_action(actions, tnl_push->header)) {
+ continue;
}
} else if (clone_type == OVS_ACTION_ATTR_OUTPUT) {
if (add_output_action(netdev, actions, ca, info)) {
@@ -1301,6 +1471,8 @@ parse_flow_actions(struct netdev *netdev,
if (nl_actions_len != 0 && !strcmp(netdev_get_type(netdev), "vxlan")) {
add_vxlan_decap_action(actions);
+ } else if (nl_actions_len != 0 && !strcmp(netdev_get_type(netdev), "geneve")) {
+ add_geneve_decap_action(actions);
}
add_count_action(actions);
NL_ATTR_FOR_EACH_UNSAFE (nla, left, nl_actions, nl_actions_len) {
From: Rongyin <rongyin@cmss.chinamobile.com> NOTE: there're some TBD should be fixed Code Source From: Self Code Description: Add geneve push/pop (non option hdr) Jira: #[Optional] 市场项目编号(名称):[Optional] --- lib/netdev-offload-dpdk.c | 172 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 172 insertions(+)