diff mbox

[ovs-dev,RFC,v4,1/1] netdev-dpdk: Add support for DPDK 16.07

Message ID 1469798820-2042-2-git-send-email-ciara.loftus@intel.com
State Superseded
Delegated to: Daniele Di Proietto
Headers show

Commit Message

Ciara Loftus July 29, 2016, 1:27 p.m. UTC
This commit introduces support for DPDK 16.07 and consequently breaks
compatibility with DPDK 16.04.

DPDK 16.07 introduces some changes to the xstats API. OVS has been
modified to comply with the changes.

Signed-off-by: Ciara Loftus <ciara.loftus@intel.com>
Tested-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Tested-by: Robert Wojciechowicz <robertx.wojciechowicz@intel.com>
---
 .travis/linux-build.sh   |   2 +-
 INSTALL.DPDK-ADVANCED.md |   6 +--
 INSTALL.DPDK.md          |  20 +++++-----
 NEWS                     |   1 +
 lib/netdev-dpdk.c        | 101 +++++++++++++++++++++++++----------------------
 5 files changed, 69 insertions(+), 61 deletions(-)
diff mbox

Patch

diff --git a/.travis/linux-build.sh b/.travis/linux-build.sh
index 065de39..1b3d43d 100755
--- a/.travis/linux-build.sh
+++ b/.travis/linux-build.sh
@@ -68,7 +68,7 @@  fi
 
 if [ "$DPDK" ]; then
     if [ -z "$DPDK_VER" ]; then
-        DPDK_VER="16.04"
+        DPDK_VER="16.07"
     fi
     install_dpdk $DPDK_VER
     if [ "$CC" = "clang" ]; then
diff --git a/INSTALL.DPDK-ADVANCED.md b/INSTALL.DPDK-ADVANCED.md
index 28d5d67..ca9e9bb 100644
--- a/INSTALL.DPDK-ADVANCED.md
+++ b/INSTALL.DPDK-ADVANCED.md
@@ -43,7 +43,7 @@  for DPDK and OVS.
     For IVSHMEM case, set `export DPDK_TARGET=x86_64-ivshmem-linuxapp-gcc`
 
     ```
-    export DPDK_DIR=/usr/src/dpdk-16.04
+    export DPDK_DIR=/usr/src/dpdk-16.07
     export DPDK_BUILD=$DPDK_DIR/$DPDK_TARGET
     make install T=$DPDK_TARGET DESTDIR=install
     ```
@@ -339,7 +339,7 @@  For users wanting to do packet forwarding using kernel stack below are the steps
        cd /usr/src/cmdline_generator
        wget https://raw.githubusercontent.com/netgroup-polito/un-orchestrator/master/orchestrator/compute_controller/plugins/kvm-libvirt/cmdline_generator/cmdline_generator.c
        wget https://raw.githubusercontent.com/netgroup-polito/un-orchestrator/master/orchestrator/compute_controller/plugins/kvm-libvirt/cmdline_generator/Makefile
-       export RTE_SDK=/usr/src/dpdk-16.04
+       export RTE_SDK=/usr/src/dpdk-16.07
        export RTE_TARGET=x86_64-ivshmem-linuxapp-gcc
        make
        ./build/cmdline_generator -m -p dpdkr0 XXX
@@ -363,7 +363,7 @@  For users wanting to do packet forwarding using kernel stack below are the steps
        mount -t hugetlbfs nodev /dev/hugepages (if not already mounted)
 
        # Build the DPDK ring application in the VM
-       export RTE_SDK=/root/dpdk-16.04
+       export RTE_SDK=/root/dpdk-16.07
        export RTE_TARGET=x86_64-ivshmem-linuxapp-gcc
        make
 
diff --git a/INSTALL.DPDK.md b/INSTALL.DPDK.md
index 312d7f5..d9b1aba 100644
--- a/INSTALL.DPDK.md
+++ b/INSTALL.DPDK.md
@@ -21,7 +21,7 @@  The DPDK support of Open vSwitch is considered 'experimental'.
 
 ### Prerequisites
 
-* Required: DPDK 16.04
+* Required: DPDK 16.07
 * Hardware: [DPDK Supported NICs] when physical ports in use
 
 ## <a name="build"></a> 2. Building and Installation
@@ -42,10 +42,10 @@  advanced install guide [INSTALL.DPDK-ADVANCED.md]
 
      ```
      cd /usr/src/
-     wget http://dpdk.org/browse/dpdk/snapshot/dpdk-16.04.zip
-     unzip dpdk-16.04.zip
+     wget http://dpdk.org/browse/dpdk/snapshot/dpdk-16.07.zip
+     unzip dpdk-16.07.zip
 
-     export DPDK_DIR=/usr/src/dpdk-16.04
+     export DPDK_DIR=/usr/src/dpdk-16.07
      cd $DPDK_DIR
      ```
 
@@ -372,9 +372,9 @@  can be found in [Vhost Walkthrough].
 
   ```
   cd /root/dpdk/
-  wget http://dpdk.org/browse/dpdk/snapshot/dpdk-16.04.zip
-  unzip dpdk-16.04.zip
-  export DPDK_DIR=/root/dpdk/dpdk-16.04
+  wget http://dpdk.org/browse/dpdk/snapshot/dpdk-16.07.zip
+  unzip dpdk-16.07.zip
+  export DPDK_DIR=/root/dpdk/dpdk-16.07
   export DPDK_TARGET=x86_64-native-linuxapp-gcc
   export DPDK_BUILD=$DPDK_DIR/$DPDK_TARGET
   cd $DPDK_DIR
@@ -530,7 +530,7 @@  can be found in [Vhost Walkthrough].
            </disk>
            <disk type='dir' device='disk'>
              <driver name='qemu' type='fat'/>
-             <source dir='/usr/src/dpdk-16.04'/>
+             <source dir='/usr/src/dpdk-16.07'/>
              <target dev='vdb' bus='virtio'/>
              <readonly/>
            </disk>
@@ -600,9 +600,9 @@  can be found in [Vhost Walkthrough].
     DPDK. It is recommended that users update Network Interface firmware to
     match what has been validated for the DPDK release.
 
-    For DPDK 16.04, the list of validated firmware versions can be found at:
+    For DPDK 16.07, the list of validated firmware versions can be found at:
 
-    http://dpdk.org/doc/guides/rel_notes/release_16_04.html
+    http://dpdk.org/doc/guides/rel_notes/release_16.07.html
 
   - dpdk, dpdkr and dpdkvhostuser ports are 'eth' type ports in the context of
     DPDK as they are all managed by the rte_ether API. This means that they
diff --git a/NEWS b/NEWS
index 6510dde..248d856 100644
--- a/NEWS
+++ b/NEWS
@@ -66,6 +66,7 @@  Post-v2.5.0
      * Remove dpdkvhostcuse port type.
      * vHost PMD integration brings vhost-user ports under control of the
        rte_ether DPDK API.
+     * Support for DPDK 16.07
    - Increase number of registers to 16.
    - ovs-benchmark: This utility has been removed due to lack of use and
      bitrot.
diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
index f843902..07e6045 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -139,6 +139,7 @@  BUILD_ASSERT_DECL((MAX_NB_MBUF / ROUND_DOWN_POW2(MAX_NB_MBUF/MIN_NB_MBUF))
 static char *vhost_sock_dir = NULL;   /* Location of vhost-user sockets */
 
 #define VHOST_ENQ_RETRY_NUM 8
+#define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
 
 /* Array that tracks the used & unused vHost user driver IDs */
 static unsigned int vhost_drv_ids[RTE_MAX_ETHPORTS];
@@ -759,6 +760,7 @@  link_status_changed_callback(uint8_t port_id,
             } else {
                 /* destroy device */
                 /* Clear tx/rx queue settings. */
+                dev->vhost_qp_nb = 0;
                 netdev_dpdk_txq_map_clear(dev);
                 netdev_request_reconfigure(&dev->up);
                 netdev_change_seq_changed(&dev->up);
@@ -1581,60 +1583,50 @@  netdev_dpdk_get_carrier(const struct netdev *netdev, bool *carrier);
 
 static void
 netdev_dpdk_convert_xstats(struct netdev_stats *stats,
-                           const struct rte_eth_xstats *xstats,
+                           const struct rte_eth_xstat *xstats,
+                           const struct rte_eth_xstat_name *names,
                            const unsigned int size)
 {
-    /* XXX Current implementation is simple search through an array
-     * to find hardcoded counter names. In future DPDK release (TBD)
-     * XSTATS API will change so each counter will be represented by
-     * unique ID instead of String. */
-
     for (unsigned int i = 0; i < size; i++) {
-        if (strcmp(XSTAT_RX_64_PACKETS, xstats[i].name) == 0) {
+        if (strcmp(XSTAT_RX_64_PACKETS, names[i].name) == 0) {
             stats->rx_1_to_64_packets = xstats[i].value;
-        } else if (strcmp(XSTAT_RX_65_TO_127_PACKETS, xstats[i].name) == 0) {
+        } else if (strcmp(XSTAT_RX_65_TO_127_PACKETS, names[i].name) == 0) {
             stats->rx_65_to_127_packets = xstats[i].value;
-        } else if (strcmp(XSTAT_RX_128_TO_255_PACKETS, xstats[i].name) == 0) {
+        } else if (strcmp(XSTAT_RX_128_TO_255_PACKETS, names[i].name) == 0) {
             stats->rx_128_to_255_packets = xstats[i].value;
-        } else if (strcmp(XSTAT_RX_256_TO_511_PACKETS, xstats[i].name) == 0) {
+        } else if (strcmp(XSTAT_RX_256_TO_511_PACKETS, names[i].name) == 0) {
             stats->rx_256_to_511_packets = xstats[i].value;
-        } else if (strcmp(XSTAT_RX_512_TO_1023_PACKETS,
-                          xstats[i].name) == 0) {
+        } else if (strcmp(XSTAT_RX_512_TO_1023_PACKETS, names[i].name) == 0) {
             stats->rx_512_to_1023_packets = xstats[i].value;
-        } else if (strcmp(XSTAT_RX_1024_TO_1522_PACKETS,
-                          xstats[i].name) == 0) {
+        } else if (strcmp(XSTAT_RX_1024_TO_1522_PACKETS, names[i].name) == 0) {
             stats->rx_1024_to_1522_packets = xstats[i].value;
-        } else if (strcmp(XSTAT_RX_1523_TO_MAX_PACKETS,
-                          xstats[i].name) == 0) {
+        } else if (strcmp(XSTAT_RX_1523_TO_MAX_PACKETS, names[i].name) == 0) {
             stats->rx_1523_to_max_packets = xstats[i].value;
-        } else if (strcmp(XSTAT_TX_64_PACKETS, xstats[i].name) == 0) {
+        } else if (strcmp(XSTAT_TX_64_PACKETS, names[i].name) == 0) {
             stats->tx_1_to_64_packets = xstats[i].value;
-        } else if (strcmp(XSTAT_TX_65_TO_127_PACKETS, xstats[i].name) == 0) {
+        } else if (strcmp(XSTAT_TX_65_TO_127_PACKETS, names[i].name) == 0) {
             stats->tx_65_to_127_packets = xstats[i].value;
-        } else if (strcmp(XSTAT_TX_128_TO_255_PACKETS, xstats[i].name) == 0) {
+        } else if (strcmp(XSTAT_TX_128_TO_255_PACKETS, names[i].name) == 0) {
             stats->tx_128_to_255_packets = xstats[i].value;
-        } else if (strcmp(XSTAT_TX_256_TO_511_PACKETS, xstats[i].name) == 0) {
+        } else if (strcmp(XSTAT_TX_256_TO_511_PACKETS, names[i].name) == 0) {
             stats->tx_256_to_511_packets = xstats[i].value;
-        } else if (strcmp(XSTAT_TX_512_TO_1023_PACKETS,
-                          xstats[i].name) == 0) {
+        } else if (strcmp(XSTAT_TX_512_TO_1023_PACKETS, names[i].name) == 0) {
             stats->tx_512_to_1023_packets = xstats[i].value;
-        } else if (strcmp(XSTAT_TX_1024_TO_1522_PACKETS,
-                          xstats[i].name) == 0) {
+        } else if (strcmp(XSTAT_TX_1024_TO_1522_PACKETS, names[i].name) == 0) {
             stats->tx_1024_to_1522_packets = xstats[i].value;
-        } else if (strcmp(XSTAT_TX_1523_TO_MAX_PACKETS,
-                          xstats[i].name) == 0) {
+        } else if (strcmp(XSTAT_TX_1523_TO_MAX_PACKETS, names[i].name) == 0) {
             stats->tx_1523_to_max_packets = xstats[i].value;
-        } else if (strcmp(XSTAT_TX_MULTICAST_PACKETS, xstats[i].name) == 0) {
+        } else if (strcmp(XSTAT_TX_MULTICAST_PACKETS, names[i].name) == 0) {
             stats->tx_multicast_packets = xstats[i].value;
-        } else if (strcmp(XSTAT_RX_BROADCAST_PACKETS, xstats[i].name) == 0) {
+        } else if (strcmp(XSTAT_RX_BROADCAST_PACKETS, names[i].name) == 0) {
             stats->rx_broadcast_packets = xstats[i].value;
-        } else if (strcmp(XSTAT_TX_BROADCAST_PACKETS, xstats[i].name) == 0) {
+        } else if (strcmp(XSTAT_TX_BROADCAST_PACKETS, names[i].name) == 0) {
             stats->tx_broadcast_packets = xstats[i].value;
-        } else if (strcmp(XSTAT_RX_UNDERSIZED_ERRORS, xstats[i].name) == 0) {
+        } else if (strcmp(XSTAT_RX_UNDERSIZED_ERRORS, names[i].name) == 0) {
             stats->rx_undersized_errors = xstats[i].value;
-        } else if (strcmp(XSTAT_RX_FRAGMENTED_ERRORS, xstats[i].name) == 0) {
+        } else if (strcmp(XSTAT_RX_FRAGMENTED_ERRORS, names[i].name) == 0) {
             stats->rx_fragmented_errors = xstats[i].value;
-        } else if (strcmp(XSTAT_RX_JABBER_ERRORS, xstats[i].name) == 0) {
+        } else if (strcmp(XSTAT_RX_JABBER_ERRORS, names[i].name) == 0) {
             stats->rx_jabber_errors = xstats[i].value;
         }
     }
@@ -1650,7 +1642,8 @@  netdev_dpdk_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
     netdev_dpdk_get_carrier(netdev, &gg);
     ovs_mutex_lock(&dev->mutex);
 
-    struct rte_eth_xstats *rte_xstats;
+    struct rte_eth_xstat *rte_xstats = NULL;
+    struct rte_eth_xstat_name *rte_xstats_names = NULL;
     int rte_xstats_len, rte_xstats_ret;
 
     if (rte_eth_stats_get(dev->port_id, &rte_stats)) {
@@ -1661,22 +1654,38 @@  netdev_dpdk_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
 
     /* Extended statistics are not yet available for vHost User PMD */
     if (dev->type == DPDK_DEV_ETH) {
-        rte_xstats_len = rte_eth_xstats_get(dev->port_id, NULL, 0);
-        if (rte_xstats_len > 0) {
-            rte_xstats = dpdk_rte_mzalloc(sizeof(*rte_xstats)
-                                          * rte_xstats_len);
-            memset(rte_xstats, 0xff, sizeof(*rte_xstats) * rte_xstats_len);
-            rte_xstats_ret = rte_eth_xstats_get(dev->port_id, rte_xstats,
-                                                rte_xstats_len);
-            if (rte_xstats_ret > 0 && rte_xstats_ret <= rte_xstats_len) {
-                netdev_dpdk_convert_xstats(stats, rte_xstats, rte_xstats_ret);
-            }
-            rte_free(rte_xstats);
+        /* Get length of statistics */
+        rte_xstats_len = rte_eth_xstats_get_names(dev->port_id, NULL, 0);
+        if (rte_xstats_len < 0) {
+            VLOG_WARN("Cannot get XSTATS values for port: %i", dev->port_id);
+            goto out;
+        }
+        /* Reserve memory for xstats names and values */
+        rte_xstats_names = xcalloc(rte_xstats_len, sizeof(*rte_xstats_names));
+        rte_xstats = xcalloc(rte_xstats_len, sizeof(*rte_xstats));
+
+        /* Retreive xstats names */
+        if (rte_xstats_len != rte_eth_xstats_get_names(dev->port_id,
+                                           rte_xstats_names, rte_xstats_len)) {
+            VLOG_WARN("Cannot get XSTATS names for port: %i.", dev->port_id);
+            goto out;
+        }
+        /* Retreive xstats values */
+        memset(rte_xstats, 0xff, sizeof(*rte_xstats) * rte_xstats_len);
+        rte_xstats_ret = rte_eth_xstats_get(dev->port_id, rte_xstats,
+                                            rte_xstats_len);
+        if (rte_xstats_ret > 0 && rte_xstats_ret <= rte_xstats_len) {
+            netdev_dpdk_convert_xstats(stats, rte_xstats, rte_xstats_names,
+                                       rte_xstats_len);
         } else {
-            VLOG_WARN("Can't get XSTATS counters for port: %i.", dev->port_id);
+            VLOG_WARN("Cannot get XSTATS values for port: %i.", dev->port_id);
         }
     }
 
+out:
+    free(rte_xstats);
+    free(rte_xstats_names);
+
     stats->rx_packets = rte_stats.ipackets;
     stats->tx_packets = rte_stats.opackets;
     stats->rx_bytes = rte_stats.ibytes;
@@ -1687,11 +1696,9 @@  netdev_dpdk_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
          * instead */
         stats->rx_errors = rte_stats.ierrors - rte_stats.imissed;
         stats->tx_errors = rte_stats.oerrors;
-        stats->multicast = rte_stats.imcasts;
     } else {
         stats->rx_errors = UINT64_MAX;
         stats->tx_errors = UINT64_MAX;
-        stats->multicast = UINT64_MAX;
     }
 
     rte_spinlock_lock(&dev->stats_lock);