diff mbox series

[ovs-dev,4/4] netdev-offload-dpdk: Add support for matching on gre fields

Message ID 1631606713-20513-5-git-send-email-nanteby@nvidia.com
State Superseded
Headers show
Series netdev datapath hardware offload support for GRE flows | expand

Checks

Context Check Description
ovsrobot/apply-robot success apply and check: success
ovsrobot/github-robot-_Build_and_Test success github build: passed

Commit Message

Nir Anteby Sept. 14, 2021, 8:05 a.m. UTC
Add parsing gre match fields.

Signed-off-by: Nir Anteby <nanteby@nvidia.com>
---
 NEWS                      |  2 +
 lib/netdev-offload-dpdk.c | 96 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 98 insertions(+)
diff mbox series

Patch

diff --git a/NEWS b/NEWS
index 1f2adf7..35b71f8 100644
--- a/NEWS
+++ b/NEWS
@@ -8,6 +8,8 @@  Post-v2.16.0
        by default.  'other_config:dpdk-socket-limit' can be set equal to
        the 'other_config:dpdk-socket-mem' to preserve the legacy memory
        limiting behavior.
+     * Add hardware offload support for GRE flows (experimental).
+       Available only if DPDK experimantal APIs enabled during the build.
 
 
 v2.16.0 - 16 Aug 2021
diff --git a/lib/netdev-offload-dpdk.c b/lib/netdev-offload-dpdk.c
index d15302a..f08638f 100644
--- a/lib/netdev-offload-dpdk.c
+++ b/lib/netdev-offload-dpdk.c
@@ -19,6 +19,7 @@ 
 #include <sys/types.h>
 #include <netinet/ip6.h>
 #include <rte_flow.h>
+#include <rte_gre.h>
 
 #include "cmap.h"
 #include "dpif-netdev.h"
@@ -390,6 +391,46 @@  dump_flow_pattern(struct ds *s,
                               ntohl(spec_vni) >> 8, ntohl(mask_vni) >> 8);
         }
         ds_put_cstr(s, "/ ");
+    } else if (item->type == RTE_FLOW_ITEM_TYPE_GRE) {
+        const struct rte_flow_item_gre *gre_spec = item->spec;
+        const struct rte_flow_item_gre *gre_mask = item->mask;
+        const struct rte_gre_hdr *greh_spec, *greh_mask;
+        uint8_t c_bit_spec, c_bit_mask;
+        uint8_t k_bit_spec, k_bit_mask;
+
+        ds_put_cstr(s, "gre ");
+        if (gre_spec) {
+            if (!gre_mask) {
+                gre_mask = &rte_flow_item_gre_mask;
+            }
+            greh_spec = (struct rte_gre_hdr *) gre_spec;
+            greh_mask = (struct rte_gre_hdr *) gre_mask;
+
+            c_bit_spec = greh_spec->c;
+            c_bit_mask = greh_mask->c ? UINT8_MAX : 0;
+            DUMP_PATTERN_ITEM(c_bit_mask, "c_bit", "%"PRIu8,
+                              c_bit_spec, c_bit_mask);
+
+            k_bit_spec = greh_spec->k;
+            k_bit_mask = greh_mask->k ? UINT8_MAX : 0;
+            DUMP_PATTERN_ITEM(k_bit_mask, "k_bit", "%"PRIu8,
+                              k_bit_spec, k_bit_mask);
+        }
+        ds_put_cstr(s, "/ ");
+    } else if (item->type == RTE_FLOW_ITEM_TYPE_GRE_KEY) {
+        const rte_be32_t gre_mask = RTE_BE32(UINT32_MAX);
+        const rte_be32_t *key_spec = item->spec;
+        const rte_be32_t *key_mask = item->mask;
+
+        ds_put_cstr(s, "gre_key ");
+        if (key_spec) {
+            if (!key_mask) {
+                key_mask = &gre_mask;
+            }
+            DUMP_PATTERN_ITEM(*key_mask, "value", "%"PRIu32,
+                              ntohl(*key_spec), ntohl(*key_mask));
+        }
+        ds_put_cstr(s, "/ ");
     } else {
         ds_put_format(s, "unknown rte flow pattern (%d)\n", item->type);
     }
@@ -1028,6 +1069,58 @@  parse_vxlan_match(struct flow_patterns *patterns,
     return 0;
 }
 
+static int
+parse_gre_match(struct flow_patterns *patterns,
+                struct match *match)
+{
+    struct rte_flow_item_gre *gre_spec, *gre_mask;
+    struct rte_gre_hdr *greh_spec, *greh_mask;
+    rte_be32_t *key_spec, *key_mask;
+    struct flow *consumed_masks;
+    int ret;
+
+
+    ret = parse_tnl_ip_match(patterns, match, IPPROTO_GRE);
+    if (ret) {
+        return -1;
+    }
+
+    gre_spec = xzalloc(sizeof *gre_spec);
+    gre_mask = xzalloc(sizeof *gre_mask);
+    add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_GRE, gre_spec, gre_mask);
+
+    consumed_masks = &match->wc.masks;
+
+    greh_spec = (struct rte_gre_hdr *) gre_spec;
+    greh_mask = (struct rte_gre_hdr *) gre_mask;
+
+    if (match->wc.masks.tunnel.flags & FLOW_TNL_F_CSUM) {
+        greh_spec->c = !!(match->flow.tunnel.flags & FLOW_TNL_F_CSUM);
+        greh_mask->c = 1;
+        consumed_masks->tunnel.flags &= ~FLOW_TNL_F_CSUM;
+    }
+
+    if (match->wc.masks.tunnel.flags & FLOW_TNL_F_KEY) {
+        greh_spec->k = !!(match->flow.tunnel.flags & FLOW_TNL_F_KEY);
+        greh_mask->k = 1;
+
+        key_spec = xzalloc(sizeof *key_spec);
+        key_mask = xzalloc(sizeof *key_mask);
+
+        *key_spec = htonl(ntohll(match->flow.tunnel.tun_id));
+        *key_mask = htonl(ntohll(match->wc.masks.tunnel.tun_id));
+
+        consumed_masks->tunnel.tun_id = 0;
+        consumed_masks->tunnel.flags &= ~FLOW_TNL_F_KEY;
+        add_flow_pattern(patterns, RTE_FLOW_ITEM_TYPE_GRE_KEY, key_spec,
+                         key_mask);
+    }
+
+    consumed_masks->tunnel.flags &= ~FLOW_TNL_F_DONT_FRAGMENT;
+
+    return 0;
+}
+
 static int OVS_UNUSED
 parse_flow_tnl_match(struct netdev *tnldev,
                      struct flow_patterns *patterns,
@@ -1044,6 +1137,9 @@  parse_flow_tnl_match(struct netdev *tnldev,
     if (!strcmp(netdev_get_type(tnldev), "vxlan")) {
         ret = parse_vxlan_match(patterns, match);
     }
+    else if (!strcmp(netdev_get_type(tnldev), "gre")) {
+        ret = parse_gre_match(patterns, match);
+    }
 
     return ret;
 }