[ovs-dev,v4,4/5] netdev-dpdk: add debug for rte flow patterns

Message ID 1511768584-19167-5-git-send-email-yliu@fridaylinux.org
State New
Delegated to: Ian Stokes
Headers show
Series
  • OVS-DPDK flow offload with rte_flow
Related show

Commit Message

Yuanhan Liu Nov. 27, 2017, 7:43 a.m.
The log level will be set to DBG when this patchset is close to
being merged.

Co-authored-by: Finn Christensen <fc@napatech.com>
Signed-off-by: Yuanhan Liu <yliu@fridaylinux.org>
Signed-off-by: Finn Christensen <fc@napatech.com>
---
 lib/netdev-dpdk.c | 177 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 177 insertions(+)

Patch

diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
index b00fc5e..84660ad 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -3394,6 +3394,182 @@  struct flow_actions {
 };
 
 static void
+dump_flow_pattern(struct rte_flow_item *item)
+{
+    if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+        const struct rte_flow_item_eth *eth_spec = item->spec;
+        const struct rte_flow_item_eth *eth_mask = item->mask;
+
+        VLOG_INFO("rte flow eth pattern:\n");
+        if (eth_spec) {
+            VLOG_INFO("  spec: src="ETH_ADDR_FMT", dst="ETH_ADDR_FMT", "
+                     "type=0x%04" PRIx16"\n",
+                     eth_spec->src.addr_bytes[0], eth_spec->src.addr_bytes[1],
+                     eth_spec->src.addr_bytes[2], eth_spec->src.addr_bytes[3],
+                     eth_spec->src.addr_bytes[4], eth_spec->src.addr_bytes[5],
+                     eth_spec->dst.addr_bytes[0], eth_spec->dst.addr_bytes[1],
+                     eth_spec->dst.addr_bytes[2], eth_spec->dst.addr_bytes[3],
+                     eth_spec->dst.addr_bytes[4], eth_spec->dst.addr_bytes[5],
+                     ntohs(eth_spec->type));
+        } else {
+            VLOG_INFO("  spec = null\n");
+        }
+        if (eth_mask) {
+            VLOG_INFO("  mask: src="ETH_ADDR_FMT", dst="ETH_ADDR_FMT", "
+                     "type=0x%04"PRIx16"\n",
+                     eth_mask->src.addr_bytes[0], eth_mask->src.addr_bytes[1],
+                     eth_mask->src.addr_bytes[2], eth_mask->src.addr_bytes[3],
+                     eth_mask->src.addr_bytes[4], eth_mask->src.addr_bytes[5],
+                     eth_mask->dst.addr_bytes[0], eth_mask->dst.addr_bytes[1],
+                     eth_mask->dst.addr_bytes[2], eth_mask->dst.addr_bytes[3],
+                     eth_mask->dst.addr_bytes[4], eth_mask->dst.addr_bytes[5],
+                     eth_mask->type);
+        } else {
+            VLOG_INFO("  mask = null\n");
+        }
+    }
+
+    if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+        const struct rte_flow_item_vlan *vlan_spec = item->spec;
+        const struct rte_flow_item_vlan *vlan_mask = item->mask;
+
+        VLOG_INFO("rte flow vlan pattern:\n");
+        if (vlan_spec) {
+            VLOG_INFO("  spec: tpid=0x%"PRIx16", tci=0x%"PRIx16"\n",
+                     ntohs(vlan_spec->tpid), ntohs(vlan_spec->tci));
+        } else {
+            VLOG_INFO("  spec = null\n");
+        }
+
+        if (vlan_mask) {
+            VLOG_INFO("  mask: tpid=0x%"PRIx16", tci=0x%"PRIx16"\n",
+                     vlan_mask->tpid, vlan_mask->tci);
+        } else {
+            VLOG_INFO("  mask = null\n");
+        }
+    }
+
+    if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
+        const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
+        const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
+
+        VLOG_INFO("rte flow ipv4 pattern:\n");
+        if (ipv4_spec) {
+            VLOG_INFO("  spec: tos=0x%"PRIx8", ttl=%"PRIx8", proto=0x%"PRIx8
+                     ", src="IP_FMT", dst="IP_FMT"\n",
+                     ipv4_spec->hdr.type_of_service,
+                     ipv4_spec->hdr.time_to_live,
+                     ipv4_spec->hdr.next_proto_id,
+                     IP_ARGS(ipv4_spec->hdr.src_addr),
+                     IP_ARGS(ipv4_spec->hdr.dst_addr));
+        } else {
+            VLOG_INFO("  spec = null\n");
+        }
+        if (ipv4_mask) {
+            VLOG_INFO("  mask: tos=0x%"PRIx8", ttl=%"PRIx8", proto=0x%"PRIx8
+                     ", src="IP_FMT", dst="IP_FMT"\n",
+                     ipv4_mask->hdr.type_of_service,
+                     ipv4_mask->hdr.time_to_live,
+                     ipv4_mask->hdr.next_proto_id,
+                     IP_ARGS(ipv4_mask->hdr.src_addr),
+                     IP_ARGS(ipv4_mask->hdr.dst_addr));
+        } else {
+            VLOG_INFO("  mask = null\n");
+        }
+    }
+
+    if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+        const struct rte_flow_item_udp *udp_spec = item->spec;
+        const struct rte_flow_item_udp *udp_mask = item->mask;
+
+        VLOG_INFO("rte flow udp pattern:\n");
+        if (udp_spec) {
+            VLOG_INFO("  spec: src_port=%"PRIu16", dst_port=%"PRIu16"\n",
+                     ntohs(udp_spec->hdr.src_port),
+                     ntohs(udp_spec->hdr.dst_port));
+        } else {
+            VLOG_INFO("  spec = null\n");
+        }
+        if (udp_mask) {
+            VLOG_INFO("  mask: src_port=0x%"PRIx16", dst_port=0x%"PRIx16"\n",
+                     udp_mask->hdr.src_port,
+                     udp_mask->hdr.dst_port);
+        } else {
+            VLOG_INFO("  mask = null\n");
+        }
+    }
+
+    if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
+        const struct rte_flow_item_sctp *sctp_spec = item->spec;
+        const struct rte_flow_item_sctp *sctp_mask = item->mask;
+
+        VLOG_INFO("rte flow sctp pattern:\n");
+        if (sctp_spec) {
+            VLOG_INFO("  spec: src_port=%"PRIu16", dst_port=%"PRIu16"\n",
+                     ntohs(sctp_spec->hdr.src_port),
+                     ntohs(sctp_spec->hdr.dst_port));
+        } else {
+            VLOG_INFO("  spec = null\n");
+        }
+        if (sctp_mask) {
+            VLOG_INFO("  mask: src_port=0x%"PRIx16", dst_port=0x%"PRIx16"\n",
+                     sctp_mask->hdr.src_port,
+                     sctp_mask->hdr.dst_port);
+        } else {
+            VLOG_INFO("  mask = null\n");
+        }
+    }
+
+    if (item->type == RTE_FLOW_ITEM_TYPE_ICMP) {
+        const struct rte_flow_item_icmp *icmp_spec = item->spec;
+        const struct rte_flow_item_icmp *icmp_mask = item->mask;
+
+        VLOG_INFO("rte flow icmp pattern:\n");
+        if (icmp_spec) {
+            VLOG_INFO("  spec: icmp_type=%"PRIu8", icmp_code=%"PRIu8"\n",
+                     ntohs(icmp_spec->hdr.icmp_type),
+                     ntohs(icmp_spec->hdr.icmp_code));
+        } else {
+            VLOG_INFO("  spec = null\n");
+        }
+        if (icmp_mask) {
+            VLOG_INFO("  mask: icmp_type=0x%"PRIx8", icmp_code=0x%"PRIx8"\n",
+                     icmp_spec->hdr.icmp_type,
+                     icmp_spec->hdr.icmp_code);
+        } else {
+            VLOG_INFO("  mask = null\n");
+        }
+    }
+
+    if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
+        const struct rte_flow_item_tcp *tcp_spec = item->spec;
+        const struct rte_flow_item_tcp *tcp_mask = item->mask;
+
+        VLOG_INFO("rte flow tcp pattern:\n");
+        if (tcp_spec) {
+            VLOG_INFO("  spec: src_port=%"PRIu16", dst_port=%"PRIu16
+                     ", data_off=0x%"PRIx8", tcp_flags=0x%"PRIx8"\n",
+                     ntohs(tcp_spec->hdr.src_port),
+                     ntohs(tcp_spec->hdr.dst_port),
+                     tcp_spec->hdr.data_off,
+                     tcp_spec->hdr.tcp_flags);
+        } else {
+            VLOG_INFO("  spec = null\n");
+        }
+        if (tcp_mask) {
+            VLOG_INFO("  mask: src_port=%"PRIx16", dst_port=%"PRIx16
+                     ", data_off=0x%"PRIx8", tcp_flags=0x%"PRIx8"\n",
+                     tcp_mask->hdr.src_port,
+                     tcp_mask->hdr.dst_port,
+                     tcp_mask->hdr.data_off,
+                     tcp_mask->hdr.tcp_flags);
+        } else {
+            VLOG_INFO("  mask = null\n");
+        }
+    }
+}
+
+static void
 add_flow_pattern(struct flow_patterns *patterns, enum rte_flow_item_type type,
                  const void *spec, const void *mask)
 {
@@ -3412,6 +3588,7 @@  add_flow_pattern(struct flow_patterns *patterns, enum rte_flow_item_type type,
     patterns->items[cnt].spec = spec;
     patterns->items[cnt].mask = mask;
     patterns->items[cnt].last = NULL;
+    dump_flow_pattern(&patterns->items[cnt]);
     patterns->cnt++;
 }