diff mbox series

[ovs-dev,v2] Detailed packet drop statistics per dpdk and vhostuser ports

Message ID 1560519499-8239-1-git-send-email-sriram.v@altencalsoftlabs.com
State Changes Requested
Delegated to: Ilya Maximets
Headers show
Series [ovs-dev,v2] Detailed packet drop statistics per dpdk and vhostuser ports | expand

Commit Message

Li,Rongqing via dev June 14, 2019, 1:38 p.m. UTC
OVS may be unable to transmit packets for multiple reasons and
today there is a single counter to track packets dropped due to
any of those reasons. The most common reason is that a VM is
unable to read packets fast enough causing the vhostuser port
transmit queue on the OVS side to become full. This manifests
as a problem with VNFs not receiving all packets. Having a
separate drop counter to track packets dropped because the
transmit queue is full will clearly indicate that the problem
is on the VM side and not in OVS. Similarly maintaining separate
counters for all possible drops helps in indicating sensible
cause for packet drops.

This patch adds counters to track packets dropped due to all
possible reasons and these counters are displayed along with
other stats in "ovs-vsctl get interface <iface> statistics"
command. The detailed stats will be available for both dpdk and
vhostuser ports.

Following are the details of the new counters :

tx_qfull_drops : These are the packets dropped due to
transmit queue overrun.

tx_mtu_exceeded_drops : These are the packets dropped due
to MTU mismatch (i.e Pkt len > Max Dev MTU).

tx_qos_drops/rx_qos_drops : These are the packets dropped due
to transmission/reception rate exceeding the configured
Egress/Ingress policer rate on the interface.

Signed-off-by: Sriram Vatala <sriram.v@altencalsoftlabs.com>
---
 include/openvswitch/netdev.h                       |  8 ++++
 lib/netdev-dpdk.c                                  | 49 ++++++++++++++++++----
 utilities/bugtool/automake.mk                      |  3 +-
 utilities/bugtool/ovs-bugtool-get-iface-stats      | 25 +++++++++++
 .../bugtool/plugins/network-status/openvswitch.xml |  3 ++
 vswitchd/bridge.c                                  |  6 ++-
 6 files changed, 85 insertions(+), 9 deletions(-)
 create mode 100755 utilities/bugtool/ovs-bugtool-get-iface-stats

Comments

Li,Rongqing via dev June 18, 2019, 6:05 a.m. UTC | #1
Please consider this as a gentle remainder.

Thanks & Regards,
Sriram.

-----Original Message-----
From: Sriram Vatala <sriram.v@altencalsoftlabs.com> 
Sent: 14 June 2019 19:08
To: ovs-dev@openvswitch.org
Cc: Sriram Vatala <sriram.v@altencalsoftlabs.com>
Subject: [PATCH v2] Detailed packet drop statistics per dpdk and vhostuser
ports

OVS may be unable to transmit packets for multiple reasons and today there
is a single counter to track packets dropped due to any of those reasons.
The most common reason is that a VM is unable to read packets fast enough
causing the vhostuser port transmit queue on the OVS side to become full.
This manifests as a problem with VNFs not receiving all packets. Having a
separate drop counter to track packets dropped because the transmit queue is
full will clearly indicate that the problem is on the VM side and not in
OVS. Similarly maintaining separate counters for all possible drops helps in
indicating sensible cause for packet drops.

This patch adds counters to track packets dropped due to all possible
reasons and these counters are displayed along with other stats in
"ovs-vsctl get interface <iface> statistics"
command. The detailed stats will be available for both dpdk and vhostuser
ports.

Following are the details of the new counters :

tx_qfull_drops : These are the packets dropped due to transmit queue
overrun.

tx_mtu_exceeded_drops : These are the packets dropped due to MTU mismatch
(i.e Pkt len > Max Dev MTU).

tx_qos_drops/rx_qos_drops : These are the packets dropped due to
transmission/reception rate exceeding the configured Egress/Ingress policer
rate on the interface.

Signed-off-by: Sriram Vatala <sriram.v@altencalsoftlabs.com>
---
 include/openvswitch/netdev.h                       |  8 ++++
 lib/netdev-dpdk.c                                  | 49
++++++++++++++++++----
 utilities/bugtool/automake.mk                      |  3 +-
 utilities/bugtool/ovs-bugtool-get-iface-stats      | 25 +++++++++++
 .../bugtool/plugins/network-status/openvswitch.xml |  3 ++
 vswitchd/bridge.c                                  |  6 ++-
 6 files changed, 85 insertions(+), 9 deletions(-)  create mode 100755
utilities/bugtool/ovs-bugtool-get-iface-stats

diff --git a/include/openvswitch/netdev.h b/include/openvswitch/netdev.h
index 0c10f7b..69480a4 100644
--- a/include/openvswitch/netdev.h
+++ b/include/openvswitch/netdev.h
@@ -61,6 +61,14 @@ struct netdev_stats {
     uint64_t tx_heartbeat_errors;
     uint64_t tx_window_errors;
 
+    /* Detailed receive drops. */
+    uint64_t rx_qos_drops;
+
+    /* Detailed transmit drops. */
+    uint64_t tx_qfull_drops;
+    uint64_t tx_qos_drops;
+    uint64_t tx_mtu_drops;
+
     /* Extended statistics based on RFC2819. */
     uint64_t rx_1_to_64_packets;
     uint64_t rx_65_to_127_packets;
diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c index 3498b32..6c2eb38
100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -2124,6 +2124,7 @@ netdev_dpdk_vhost_update_rx_counters(struct
netdev_stats *stats,
 
     stats->rx_packets += count;
     stats->rx_dropped += dropped;
+    stats->rx_qos_drops += dropped;
     for (i = 0; i < count; i++) {
         packet = packets[i];
         packet_size = dp_packet_size(packet); @@ -2236,6 +2237,7 @@
netdev_dpdk_rxq_recv(struct netdev_rxq *rxq, struct dp_packet_batch *batch,
     if (OVS_UNLIKELY(dropped)) {
         rte_spinlock_lock(&dev->stats_lock);
         dev->stats.rx_dropped += dropped;
+        dev->stats.rx_qos_drops += dropped;
         rte_spinlock_unlock(&dev->stats_lock);
     }
 
@@ -2319,6 +2321,9 @@ __netdev_dpdk_vhost_send(struct netdev *netdev, int
qid,
     struct rte_mbuf **cur_pkts = (struct rte_mbuf **) pkts;
     unsigned int total_pkts = cnt;
     unsigned int dropped = 0;
+    unsigned int mtu_drops;
+    unsigned int qos_drops;
+    unsigned int qfull_drops;
     int i, retries = 0;
     int vid = netdev_dpdk_get_vid(dev);
 
@@ -2335,9 +2340,11 @@ __netdev_dpdk_vhost_send(struct netdev *netdev, int
qid,
     rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
 
     cnt = netdev_dpdk_filter_packet_len(dev, cur_pkts, cnt);
+    mtu_drops = total_pkts - cnt;
+    qos_drops = cnt;
     /* Check has QoS has been configured for the netdev */
     cnt = netdev_dpdk_qos_run(dev, cur_pkts, cnt, true);
-    dropped = total_pkts - cnt;
+    qos_drops -= cnt;
 
     do {
         int vhost_qid = qid * VIRTIO_QNUM + VIRTIO_RXQ; @@ -2357,9 +2364,14
@@ __netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
 
     rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
 
+    qfull_drops = cnt;
+    dropped = mtu_drops + qos_drops + qfull_drops;
     rte_spinlock_lock(&dev->stats_lock);
     netdev_dpdk_vhost_update_tx_counters(&dev->stats, pkts, total_pkts,
-                                         cnt + dropped);
+                                         dropped);
+    dev->stats.tx_mtu_drops += mtu_drops;
+    dev->stats.tx_qos_drops += qos_drops;
+    dev->stats.tx_qfull_drops += qfull_drops;
     rte_spinlock_unlock(&dev->stats_lock);
 
 out:
@@ -2384,12 +2396,15 @@ dpdk_do_tx_copy(struct netdev *netdev, int qid,
struct dp_packet_batch *batch)
     struct rte_mbuf *pkts[PKT_ARRAY_SIZE];
     uint32_t cnt = batch_cnt;
     uint32_t dropped = 0;
+    uint32_t mtu_drops = 0;
+    uint32_t qos_drops = 0;
+    uint32_t qfull_drops = 0;
 
     if (dev->type != DPDK_DEV_VHOST) {
         /* Check if QoS has been configured for this netdev. */
         cnt = netdev_dpdk_qos_run(dev, (struct rte_mbuf **) batch->packets,
                                   batch_cnt, false);
-        dropped += batch_cnt - cnt;
+        qos_drops = batch_cnt - cnt;
     }
 
     uint32_t txcnt = 0;
@@ -2402,7 +2417,7 @@ dpdk_do_tx_copy(struct netdev *netdev, int qid, struct
dp_packet_batch *batch)
             VLOG_WARN_RL(&rl, "Too big size %u max_packet_len %d",
                          size, dev->max_packet_len);
 
-            dropped++;
+            mtu_drops++;
             continue;
         }
 
@@ -2425,13 +2440,17 @@ dpdk_do_tx_copy(struct netdev *netdev, int qid,
struct dp_packet_batch *batch)
             __netdev_dpdk_vhost_send(netdev, qid, (struct dp_packet **)
pkts,
                                      txcnt);
         } else {
-            dropped += netdev_dpdk_eth_tx_burst(dev, qid, pkts, txcnt);
+            qfull_drops = netdev_dpdk_eth_tx_burst(dev, qid, pkts, 
+ txcnt);
         }
     }
 
+    dropped += mtu_drops + qos_drops + qfull_drops;
     if (OVS_UNLIKELY(dropped)) {
         rte_spinlock_lock(&dev->stats_lock);
         dev->stats.tx_dropped += dropped;
+        dev->stats.tx_mtu_drops += mtu_drops;
+        dev->stats.tx_qos_drops += qos_drops;
+        dev->stats.tx_qfull_drops += qfull_drops;
         rte_spinlock_unlock(&dev->stats_lock);
     }
 }
@@ -2473,18 +2492,25 @@ netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
         dp_packet_delete_batch(batch, true);
     } else {
         int tx_cnt, dropped;
+        int mtu_drops, qfull_drops, qos_drops;
         int batch_cnt = dp_packet_batch_size(batch);
         struct rte_mbuf **pkts = (struct rte_mbuf **) batch->packets;
 
         tx_cnt = netdev_dpdk_filter_packet_len(dev, pkts, batch_cnt);
+        mtu_drops = batch_cnt - tx_cnt;
+        qos_drops = tx_cnt;
         tx_cnt = netdev_dpdk_qos_run(dev, pkts, tx_cnt, true);
-        dropped = batch_cnt - tx_cnt;
+        qos_drops -= tx_cnt;
 
-        dropped += netdev_dpdk_eth_tx_burst(dev, qid, pkts, tx_cnt);
+        qfull_drops = netdev_dpdk_eth_tx_burst(dev, qid, pkts, tx_cnt);
 
+        dropped = mtu_drops + qos_drops + qfull_drops;
         if (OVS_UNLIKELY(dropped)) {
             rte_spinlock_lock(&dev->stats_lock);
             dev->stats.tx_dropped += dropped;
+            dev->stats.tx_mtu_drops += mtu_drops;
+            dev->stats.tx_qos_drops += qos_drops;
+            dev->stats.tx_qfull_drops += qfull_drops;
             rte_spinlock_unlock(&dev->stats_lock);
         }
     }
@@ -2598,6 +2624,11 @@ netdev_dpdk_vhost_get_stats(const struct netdev
*netdev,
     stats->rx_errors = dev->stats.rx_errors;
     stats->rx_length_errors = dev->stats.rx_length_errors;
 
+    stats->tx_mtu_drops = dev->stats.tx_mtu_drops;
+    stats->tx_qos_drops = dev->stats.tx_qos_drops;
+    stats->tx_qfull_drops = dev->stats.tx_qfull_drops;
+    stats->rx_qos_drops = dev->stats.rx_qos_drops;
+
     stats->rx_1_to_64_packets = dev->stats.rx_1_to_64_packets;
     stats->rx_65_to_127_packets = dev->stats.rx_65_to_127_packets;
     stats->rx_128_to_255_packets = dev->stats.rx_128_to_255_packets; @@
-2733,6 +2764,10 @@ out:
     rte_spinlock_lock(&dev->stats_lock);
     stats->tx_dropped = dev->stats.tx_dropped;
     stats->rx_dropped = dev->stats.rx_dropped;
+    stats->tx_mtu_drops = dev->stats.tx_mtu_drops;
+    stats->tx_qos_drops = dev->stats.tx_qos_drops;
+    stats->tx_qfull_drops = dev->stats.tx_qfull_drops;
+    stats->rx_qos_drops = dev->stats.rx_qos_drops;
     rte_spinlock_unlock(&dev->stats_lock);
 
     /* These are the available DPDK counters for packets not received due
to diff --git a/utilities/bugtool/automake.mk
b/utilities/bugtool/automake.mk index 18fa347..9657468 100644
--- a/utilities/bugtool/automake.mk
+++ b/utilities/bugtool/automake.mk
@@ -22,7 +22,8 @@ bugtool_scripts = \
 	utilities/bugtool/ovs-bugtool-ovs-bridge-datapath-type \
 	utilities/bugtool/ovs-bugtool-ovs-vswitchd-threads-affinity \
 	utilities/bugtool/ovs-bugtool-qos-configs \
-	utilities/bugtool/ovs-bugtool-get-dpdk-nic-numa
+	utilities/bugtool/ovs-bugtool-get-dpdk-nic-numa \
+	utilities/bugtool/ovs-bugtool-get-iface-stats
 
 scripts_SCRIPTS += $(bugtool_scripts)
 
diff --git a/utilities/bugtool/ovs-bugtool-get-iface-stats
b/utilities/bugtool/ovs-bugtool-get-iface-stats
new file mode 100755
index 0000000..0fe175e
--- /dev/null
+++ b/utilities/bugtool/ovs-bugtool-get-iface-stats
@@ -0,0 +1,25 @@
+#! /bin/bash
+
+# This library is free software; you can redistribute it and/or # 
+modify it under the terms of version 2.1 of the GNU Lesser General # 
+Public License as published by the Free Software Foundation.
+#
+# This library is distributed in the hope that it will be useful, # but 
+WITHOUT ANY WARRANTY; without even the implied warranty of # 
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU # 
+Lesser General Public License for more details.
+#
+# Copyright (C) 2019 Ericsson AB
+
+for bridge in `ovs-vsctl -- --real list-br` do
+    echo -e "\nBridge : ${bridge}\n"
+    for iface in `ovs-vsctl list-ifaces ${bridge}`
+    do
+        echo -e "iface : ${iface}"
+        ovs-vsctl get interface ${iface} statistics
+        echo -e "\n"
+    done
+    echo -e "iface : ${bridge}"
+    ovs-vsctl get interface ${bridge} statistics done
diff --git a/utilities/bugtool/plugins/network-status/openvswitch.xml
b/utilities/bugtool/plugins/network-status/openvswitch.xml
index d39867c..327c006 100644
--- a/utilities/bugtool/plugins/network-status/openvswitch.xml
+++ b/utilities/bugtool/plugins/network-status/openvswitch.xml
@@ -34,6 +34,9 @@
     <command label="ovs-appctl-dpctl-dump-flows-netdev" filters="ovs"
repeat="2">ovs-appctl dpctl/dump-flows netdev@ovs-netdev</command>
     <command label="ovs-appctl-dpctl-dump-flows-system" filters="ovs"
repeat="2">ovs-appctl dpctl/dump-flows system@ovs-system</command>
     <command label="ovs-appctl-dpctl-show-s" filters="ovs"
repeat="2">ovs-appctl dpctl/show -s</command>
+    <command label="ovs-vsctl-get-interface-statistics"
+
filters="ovs">/usr/share/openvswitch/scripts/ovs-bugtool-get-iface-stats
+    </command>
     <command label="ovs-ofctl-show"
filters="ovs">/usr/share/openvswitch/scripts/ovs-bugtool-ovs-ofctl-loop-over
-bridges "show"</command>
     <command label="ovs-ofctl-dump-flows" filters="ovs"
repeat="2">/usr/share/openvswitch/scripts/ovs-bugtool-ovs-ofctl-loop-over-br
idges "dump-flows"</command>
     <command label="ovs-ofctl-dump-ports" filters="ovs"
repeat="2">/usr/share/openvswitch/scripts/ovs-bugtool-ovs-ofctl-loop-over-br
idges "dump-ports"</command> diff --git a/vswitchd/bridge.c
b/vswitchd/bridge.c index 0702cc6..2cb5558 100644
--- a/vswitchd/bridge.c
+++ b/vswitchd/bridge.c
@@ -2407,7 +2407,11 @@ iface_refresh_stats(struct iface *iface)
     IFACE_STAT(rx_undersized_errors,    "rx_undersized_errors")     \
     IFACE_STAT(rx_oversize_errors,      "rx_oversize_errors")       \
     IFACE_STAT(rx_fragmented_errors,    "rx_fragmented_errors")     \
-    IFACE_STAT(rx_jabber_errors,        "rx_jabber_errors")
+    IFACE_STAT(rx_jabber_errors,        "rx_jabber_errors")         \
+    IFACE_STAT(rx_qos_drops,            "rx_qos_drops")             \
+    IFACE_STAT(tx_qfull_drops,          "tx_qfull_drops")           \
+    IFACE_STAT(tx_qos_drops,            "tx_qos_drops")             \
+    IFACE_STAT(tx_mtu_drops,            "tx_mtu_exceeded_drops")
 
 #define IFACE_STAT(MEMBER, NAME) + 1
     enum { N_IFACE_STATS = IFACE_STATS };
--
2.7.4
Kevin Traynor June 18, 2019, 2:05 p.m. UTC | #2
Hi Sriram,

On 14/06/2019 14:38, Sriram Vatala via dev wrote:
> OVS may be unable to transmit packets for multiple reasons and
> today there is a single counter to track packets dropped due to
> any of those reasons. The most common reason is that a VM is
> unable to read packets fast enough causing the vhostuser port
> transmit queue on the OVS side to become full. This manifests
> as a problem with VNFs not receiving all packets. Having a
> separate drop counter to track packets dropped because the
> transmit queue is full will clearly indicate that the problem
> is on the VM side and not in OVS. Similarly maintaining separate
> counters for all possible drops helps in indicating sensible
> cause for packet drops.
> 
> This patch adds counters to track packets dropped due to all
> possible reasons and these counters are displayed along with
> other stats in "ovs-vsctl get interface <iface> statistics"
> command. The detailed stats will be available for both dpdk and
> vhostuser ports.
> 
> Following are the details of the new counters :
> 
> tx_qfull_drops : These are the packets dropped due to
> transmit queue overrun.
> 

I'm not sure about this name, you would need to know that it was the
only reason rte_eth_tx_burst() and rte_vhost_enqueue_burst() will not
send pkts

> tx_mtu_exceeded_drops : These are the packets dropped due
> to MTU mismatch (i.e Pkt len > Max Dev MTU).
> 
> tx_qos_drops/rx_qos_drops : These are the packets dropped due
> to transmission/reception rate exceeding the configured
> Egress/Ingress policer rate on the interface.
> 

I think you also need to update vswitchd.xml with some docs for these stats

> Signed-off-by: Sriram Vatala <sriram.v@altencalsoftlabs.com>
> ---
>  include/openvswitch/netdev.h                       |  8 ++++
>  lib/netdev-dpdk.c                                  | 49 ++++++++++++++++++----
>  utilities/bugtool/automake.mk                      |  3 +-
>  utilities/bugtool/ovs-bugtool-get-iface-stats      | 25 +++++++++++
>  .../bugtool/plugins/network-status/openvswitch.xml |  3 ++
>  vswitchd/bridge.c                                  |  6 ++-
>  6 files changed, 85 insertions(+), 9 deletions(-)
>  create mode 100755 utilities/bugtool/ovs-bugtool-get-iface-stats
> 
> diff --git a/include/openvswitch/netdev.h b/include/openvswitch/netdev.h
> index 0c10f7b..69480a4 100644
> --- a/include/openvswitch/netdev.h
> +++ b/include/openvswitch/netdev.h
> @@ -61,6 +61,14 @@ struct netdev_stats {
>      uint64_t tx_heartbeat_errors;
>      uint64_t tx_window_errors;
>  
> +    /* Detailed receive drops. */
> +    uint64_t rx_qos_drops;
> +
> +    /* Detailed transmit drops. */
> +    uint64_t tx_qfull_drops;
> +    uint64_t tx_qos_drops;
> +    uint64_t tx_mtu_drops;
> +
>      /* Extended statistics based on RFC2819. */
>      uint64_t rx_1_to_64_packets;
>      uint64_t rx_65_to_127_packets;
> diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
> index 3498b32..6c2eb38 100644
> --- a/lib/netdev-dpdk.c
> +++ b/lib/netdev-dpdk.c
> @@ -2124,6 +2124,7 @@ netdev_dpdk_vhost_update_rx_counters(struct netdev_stats *stats,
>  
>      stats->rx_packets += count;
>      stats->rx_dropped += dropped;
> +    stats->rx_qos_drops += dropped;
>      for (i = 0; i < count; i++) {
>          packet = packets[i];
>          packet_size = dp_packet_size(packet);
> @@ -2236,6 +2237,7 @@ netdev_dpdk_rxq_recv(struct netdev_rxq *rxq, struct dp_packet_batch *batch,
>      if (OVS_UNLIKELY(dropped)) {
>          rte_spinlock_lock(&dev->stats_lock);
>          dev->stats.rx_dropped += dropped;
> +        dev->stats.rx_qos_drops += dropped;
>          rte_spinlock_unlock(&dev->stats_lock);
>      }
>  
> @@ -2319,6 +2321,9 @@ __netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
>      struct rte_mbuf **cur_pkts = (struct rte_mbuf **) pkts;
>      unsigned int total_pkts = cnt;
>      unsigned int dropped = 0;
> +    unsigned int mtu_drops;
> +    unsigned int qos_drops;
> +    unsigned int qfull_drops;
>      int i, retries = 0;
>      int vid = netdev_dpdk_get_vid(dev);
>  
> @@ -2335,9 +2340,11 @@ __netdev_dpdk_vhost_send(struct netdev *netdev, int qid,

There can be other drops earlier in this function, should they be logged
also?

>      rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
>  
>      cnt = netdev_dpdk_filter_packet_len(dev, cur_pkts, cnt);
> +    mtu_drops = total_pkts - cnt;
> +    qos_drops = cnt;
>      /* Check has QoS has been configured for the netdev */
>      cnt = netdev_dpdk_qos_run(dev, cur_pkts, cnt, true);
> -    dropped = total_pkts - cnt;
> +    qos_drops -= cnt;
>  
>      do {
>          int vhost_qid = qid * VIRTIO_QNUM + VIRTIO_RXQ;
> @@ -2357,9 +2364,14 @@ __netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
>  
>      rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
>  
> +    qfull_drops = cnt;
> +    dropped = mtu_drops + qos_drops + qfull_drops;
>      rte_spinlock_lock(&dev->stats_lock);
>      netdev_dpdk_vhost_update_tx_counters(&dev->stats, pkts, total_pkts,
> -                                         cnt + dropped);
> +                                         dropped);
> +    dev->stats.tx_mtu_drops += mtu_drops;
> +    dev->stats.tx_qos_drops += qos_drops;
> +    dev->stats.tx_qfull_drops += qfull_drops;

There is a function just above for updating vhost tx stats. Now the
updates are split with some updated in the function and some updated
here, it would be better to update them all in the same place.

>      rte_spinlock_unlock(&dev->stats_lock);
>  
>  out:
> @@ -2384,12 +2396,15 @@ dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet_batch *batch)
>      struct rte_mbuf *pkts[PKT_ARRAY_SIZE];
>      uint32_t cnt = batch_cnt;
>      uint32_t dropped = 0;
> +    uint32_t mtu_drops = 0;
> +    uint32_t qos_drops = 0;
> +    uint32_t qfull_drops = 0;
>  
>      if (dev->type != DPDK_DEV_VHOST) {
>          /* Check if QoS has been configured for this netdev. */
>          cnt = netdev_dpdk_qos_run(dev, (struct rte_mbuf **) batch->packets,
>                                    batch_cnt, false);
> -        dropped += batch_cnt - cnt;
> +        qos_drops = batch_cnt - cnt;
>      }
>  
>      uint32_t txcnt = 0;
> @@ -2402,7 +2417,7 @@ dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet_batch *batch)
>              VLOG_WARN_RL(&rl, "Too big size %u max_packet_len %d",
>                           size, dev->max_packet_len);
>  
> -            dropped++;
> +            mtu_drops++;
>              continue;
>          }
>  
> @@ -2425,13 +2440,17 @@ dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet_batch *batch)
>              __netdev_dpdk_vhost_send(netdev, qid, (struct dp_packet **) pkts,
>                                       txcnt);
>          } else {
> -            dropped += netdev_dpdk_eth_tx_burst(dev, qid, pkts, txcnt);
> +            qfull_drops = netdev_dpdk_eth_tx_burst(dev, qid, pkts, txcnt);
>          }
>      }
>  
> +    dropped += mtu_drops + qos_drops + qfull_drops;

= is enough, you don't need +=

>      if (OVS_UNLIKELY(dropped)) {
>          rte_spinlock_lock(&dev->stats_lock);
>          dev->stats.tx_dropped += dropped;
> +        dev->stats.tx_mtu_drops += mtu_drops;
> +        dev->stats.tx_qos_drops += qos_drops;
> +        dev->stats.tx_qfull_drops += qfull_drops;
>          rte_spinlock_unlock(&dev->stats_lock);
>      }
>  }
> @@ -2473,18 +2492,25 @@ netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
>          dp_packet_delete_batch(batch, true);
>      } else {
>          int tx_cnt, dropped;
> +        int mtu_drops, qfull_drops, qos_drops;
>          int batch_cnt = dp_packet_batch_size(batch);
>          struct rte_mbuf **pkts = (struct rte_mbuf **) batch->packets;
>  
>          tx_cnt = netdev_dpdk_filter_packet_len(dev, pkts, batch_cnt);
> +        mtu_drops = batch_cnt - tx_cnt;
> +        qos_drops = tx_cnt;
>          tx_cnt = netdev_dpdk_qos_run(dev, pkts, tx_cnt, true);
> -        dropped = batch_cnt - tx_cnt;
> +        qos_drops -= tx_cnt;
>  
> -        dropped += netdev_dpdk_eth_tx_burst(dev, qid, pkts, tx_cnt);
> +        qfull_drops = netdev_dpdk_eth_tx_burst(dev, qid, pkts, tx_cnt);
>  

Re comment on qfull stats, I'm not sure you can assume this is because
of qfull

> +        dropped = mtu_drops + qos_drops + qfull_drops;
>          if (OVS_UNLIKELY(dropped)) {
>              rte_spinlock_lock(&dev->stats_lock);
>              dev->stats.tx_dropped += dropped;
> +            dev->stats.tx_mtu_drops += mtu_drops;
> +            dev->stats.tx_qos_drops += qos_drops;
> +            dev->stats.tx_qfull_drops += qfull_drops;
>              rte_spinlock_unlock(&dev->stats_lock);
>          }
>      }
> @@ -2598,6 +2624,11 @@ netdev_dpdk_vhost_get_stats(const struct netdev *netdev,
>      stats->rx_errors = dev->stats.rx_errors;
>      stats->rx_length_errors = dev->stats.rx_length_errors;
>  
> +    stats->tx_mtu_drops = dev->stats.tx_mtu_drops;
> +    stats->tx_qos_drops = dev->stats.tx_qos_drops;
> +    stats->tx_qfull_drops = dev->stats.tx_qfull_drops;
> +    stats->rx_qos_drops = dev->stats.rx_qos_drops;
> +
>      stats->rx_1_to_64_packets = dev->stats.rx_1_to_64_packets;
>      stats->rx_65_to_127_packets = dev->stats.rx_65_to_127_packets;
>      stats->rx_128_to_255_packets = dev->stats.rx_128_to_255_packets;
> @@ -2733,6 +2764,10 @@ out:
>      rte_spinlock_lock(&dev->stats_lock);
>      stats->tx_dropped = dev->stats.tx_dropped;
>      stats->rx_dropped = dev->stats.rx_dropped;
> +    stats->tx_mtu_drops = dev->stats.tx_mtu_drops;
> +    stats->tx_qos_drops = dev->stats.tx_qos_drops;
> +    stats->tx_qfull_drops = dev->stats.tx_qfull_drops;
> +    stats->rx_qos_drops = dev->stats.rx_qos_drops;
>      rte_spinlock_unlock(&dev->stats_lock);
>  
>      /* These are the available DPDK counters for packets not received due to
> diff --git a/utilities/bugtool/automake.mk b/utilities/bugtool/automake.mk
> index 18fa347..9657468 100644
> --- a/utilities/bugtool/automake.mk
> +++ b/utilities/bugtool/automake.mk
> @@ -22,7 +22,8 @@ bugtool_scripts = \
>  	utilities/bugtool/ovs-bugtool-ovs-bridge-datapath-type \
>  	utilities/bugtool/ovs-bugtool-ovs-vswitchd-threads-affinity \
>  	utilities/bugtool/ovs-bugtool-qos-configs \
> -	utilities/bugtool/ovs-bugtool-get-dpdk-nic-numa
> +	utilities/bugtool/ovs-bugtool-get-dpdk-nic-numa \
> +	utilities/bugtool/ovs-bugtool-get-iface-stats
>  
>  scripts_SCRIPTS += $(bugtool_scripts)
>  
> diff --git a/utilities/bugtool/ovs-bugtool-get-iface-stats b/utilities/bugtool/ovs-bugtool-get-iface-stats
> new file mode 100755
> index 0000000..0fe175e
> --- /dev/null
> +++ b/utilities/bugtool/ovs-bugtool-get-iface-stats
> @@ -0,0 +1,25 @@
> +#! /bin/bash
> +
> +# This library is free software; you can redistribute it and/or
> +# modify it under the terms of version 2.1 of the GNU Lesser General
> +# Public License as published by the Free Software Foundation.
> +#
> +# This library is distributed in the hope that it will be useful,
> +# but WITHOUT ANY WARRANTY; without even the implied warranty of
> +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +# Lesser General Public License for more details.
> +#
> +# Copyright (C) 2019 Ericsson AB
> +
> +for bridge in `ovs-vsctl -- --real list-br`
> +do
> +    echo -e "\nBridge : ${bridge}\n"
> +    for iface in `ovs-vsctl list-ifaces ${bridge}`
> +    do
> +        echo -e "iface : ${iface}"
> +        ovs-vsctl get interface ${iface} statistics
> +        echo -e "\n"
> +    done
> +    echo -e "iface : ${bridge}"
> +    ovs-vsctl get interface ${bridge} statistics
> +done
> diff --git a/utilities/bugtool/plugins/network-status/openvswitch.xml b/utilities/bugtool/plugins/network-status/openvswitch.xml
> index d39867c..327c006 100644
> --- a/utilities/bugtool/plugins/network-status/openvswitch.xml
> +++ b/utilities/bugtool/plugins/network-status/openvswitch.xml
> @@ -34,6 +34,9 @@
>      <command label="ovs-appctl-dpctl-dump-flows-netdev" filters="ovs" repeat="2">ovs-appctl dpctl/dump-flows netdev@ovs-netdev</command>
>      <command label="ovs-appctl-dpctl-dump-flows-system" filters="ovs" repeat="2">ovs-appctl dpctl/dump-flows system@ovs-system</command>
>      <command label="ovs-appctl-dpctl-show-s" filters="ovs" repeat="2">ovs-appctl dpctl/show -s</command>
> +    <command label="ovs-vsctl-get-interface-statistics"
> +     filters="ovs">/usr/share/openvswitch/scripts/ovs-bugtool-get-iface-stats
> +    </command>

Probably better to be consistent and not wrap this line

>      <command label="ovs-ofctl-show" filters="ovs">/usr/share/openvswitch/scripts/ovs-bugtool-ovs-ofctl-loop-over-bridges "show"</command>
>      <command label="ovs-ofctl-dump-flows" filters="ovs" repeat="2">/usr/share/openvswitch/scripts/ovs-bugtool-ovs-ofctl-loop-over-bridges "dump-flows"</command>
>      <command label="ovs-ofctl-dump-ports" filters="ovs" repeat="2">/usr/share/openvswitch/scripts/ovs-bugtool-ovs-ofctl-loop-over-bridges "dump-ports"</command>
> diff --git a/vswitchd/bridge.c b/vswitchd/bridge.c
> index 0702cc6..2cb5558 100644
> --- a/vswitchd/bridge.c
> +++ b/vswitchd/bridge.c
> @@ -2407,7 +2407,11 @@ iface_refresh_stats(struct iface *iface)
>      IFACE_STAT(rx_undersized_errors,    "rx_undersized_errors")     \
>      IFACE_STAT(rx_oversize_errors,      "rx_oversize_errors")       \
>      IFACE_STAT(rx_fragmented_errors,    "rx_fragmented_errors")     \
> -    IFACE_STAT(rx_jabber_errors,        "rx_jabber_errors")
> +    IFACE_STAT(rx_jabber_errors,        "rx_jabber_errors")         \
> +    IFACE_STAT(rx_qos_drops,            "rx_qos_drops")             \
> +    IFACE_STAT(tx_qfull_drops,          "tx_qfull_drops")           \
> +    IFACE_STAT(tx_qos_drops,            "tx_qos_drops")             \
> +    IFACE_STAT(tx_mtu_drops,            "tx_mtu_exceeded_drops")
>  
>  #define IFACE_STAT(MEMBER, NAME) + 1
>      enum { N_IFACE_STATS = IFACE_STATS };
> 

What about updating show_dpif() to print with ovs-appctl dpctl/show -s ?
Kevin Traynor June 18, 2019, 7:38 p.m. UTC | #3
On 18/06/2019 15:05, Kevin Traynor wrote:
> Hi Sriram,
> 
> On 14/06/2019 14:38, Sriram Vatala via dev wrote:
>> OVS may be unable to transmit packets for multiple reasons and
>> today there is a single counter to track packets dropped due to
>> any of those reasons. The most common reason is that a VM is
>> unable to read packets fast enough causing the vhostuser port
>> transmit queue on the OVS side to become full. This manifests
>> as a problem with VNFs not receiving all packets. Having a
>> separate drop counter to track packets dropped because the
>> transmit queue is full will clearly indicate that the problem
>> is on the VM side and not in OVS. Similarly maintaining separate
>> counters for all possible drops helps in indicating sensible
>> cause for packet drops.
>>
>> This patch adds counters to track packets dropped due to all
>> possible reasons and these counters are displayed along with
>> other stats in "ovs-vsctl get interface <iface> statistics"
>> command. The detailed stats will be available for both dpdk and
>> vhostuser ports.
>>
>> Following are the details of the new counters :
>>
>> tx_qfull_drops : These are the packets dropped due to
>> transmit queue overrun.
>>
> 
> I'm not sure about this name, you would need to know that it was the
> only reason rte_eth_tx_burst() and rte_vhost_enqueue_burst() will not
> send pkts
> 
>> tx_mtu_exceeded_drops : These are the packets dropped due
>> to MTU mismatch (i.e Pkt len > Max Dev MTU).
>>
>> tx_qos_drops/rx_qos_drops : These are the packets dropped due
>> to transmission/reception rate exceeding the configured
>> Egress/Ingress policer rate on the interface.
>>
> 
> I think you also need to update vswitchd.xml with some docs for these stats
> 

Actually, it doesn't seem needed there, perhaps something in the dpdk docs

<snip>

>> diff --git a/vswitchd/bridge.c b/vswitchd/bridge.c
>> index 0702cc6..2cb5558 100644
>> --- a/vswitchd/bridge.c
>> +++ b/vswitchd/bridge.c
>> @@ -2407,7 +2407,11 @@ iface_refresh_stats(struct iface *iface)
>>      IFACE_STAT(rx_undersized_errors,    "rx_undersized_errors")     \
>>      IFACE_STAT(rx_oversize_errors,      "rx_oversize_errors")       \
>>      IFACE_STAT(rx_fragmented_errors,    "rx_fragmented_errors")     \
>> -    IFACE_STAT(rx_jabber_errors,        "rx_jabber_errors")
>> +    IFACE_STAT(rx_jabber_errors,        "rx_jabber_errors")         \
>> +    IFACE_STAT(rx_qos_drops,            "rx_qos_drops")             \
>> +    IFACE_STAT(tx_qfull_drops,          "tx_qfull_drops")           \
>> +    IFACE_STAT(tx_qos_drops,            "tx_qos_drops")             \
>> +    IFACE_STAT(tx_mtu_drops,            "tx_mtu_exceeded_drops")
>>  
>>  #define IFACE_STAT(MEMBER, NAME) + 1
>>      enum { N_IFACE_STATS = IFACE_STATS };
>>
> 
> What about updating show_dpif() to print with ovs-appctl dpctl/show -s ?
> 

scratch that comment, I since saw the discussion on v1

> _______________________________________________
> dev mailing list
> dev@openvswitch.org
> https://mail.openvswitch.org/mailman/listinfo/ovs-dev
>
Li,Rongqing via dev June 19, 2019, 10:07 a.m. UTC | #4
Hi Kevin,

Thanks for reviewing the patch. Please find my answers to your comments below.

Comment-1
I'm not sure about this name, you would need to know that it was the
only reason rte_eth_tx_burst() and rte_vhost_enqueue_burst() will not
send pkts.
---
Agree that queue full is not the only reason for which the above DPDK
apis will fail to send packets. But queue full is the most common reason.
Instead of overlooking other reasons like invalid queue-id, i can change
the name of the counter to a generic name something like "tx_failed_drops".
what do you think?

Comment-2
There can be other drops earlier in this function 
("__netdev_dpdk_vhost_send"), should they be logged also?
---
These are the drops for invalid queue-id or vhost device id and if device is 
not up.
Again these are very unlikely to happen, but i can add a rate limiting warn 
log.

Comment-3
>      rte_spinlock_lock(&dev->stats_lock);
>      netdev_dpdk_vhost_update_tx_counters(&dev->stats, pkts, total_pkts,
> +                                         dropped);
> +    dev->stats.tx_mtu_drops += mtu_drops;

There is a function just above for updating vhost tx stats. Now the
updates are split with some updated in the function and some updated
here, it would be better to update them all in the same place.
---
Agree. will change the implementation here.

Comment-4
>
> +    dropped += mtu_drops + qos_drops + qfull_drops;

= is enough, you don't need +=
---
Actually in the code above to this part in "dpdk_do_tx_copy" function, dropped 
will updated if mbuf alloc fails.
Here is the code snippet:


        pkts[txcnt] = rte_pktmbuf_alloc(dev->dpdk_mp->mp);
        if (OVS_UNLIKELY(!pkts[txcnt])) {
            dropped += cnt - i;
            break;
        }

To take care not to miss this in total drops, i am using dropped+=, Even if 
the above part of the code doesn't hit, as dropped variable is initialized to 
Zero
that expression should not result in incorrect value for drops.


Comment-5

> +    <command label="ovs-vsctl-get-interface-statistics"
> + 
> filters="ovs">/usr/share/openvswitch/scripts/ovs-bugtool-get-iface-stats
> +    </command>

Probably better to be consistent and not wrap this line
---
Ok. I wrapped this line as utilities/checkpatch script is giving warning as 
the line length is exceeding 79 characters. I will unwrap this.



Comment-6

> I think you also need to update vswitchd.xml with some docs for these stats
>

Actually, it doesn't seem needed there, perhaps something in the dpdk docs

---
Bit unclear on where to document this new counters. As we have not done any 
modifications to dpdk APIs, can i document these new counters in man page of 
ovs-vsctl.
what do you think?

Thanks & Regards,
Sriram.

-----Original Message-----
From: Kevin Traynor <ktraynor@redhat.com>
Sent: 19 June 2019 01:09
To: Sriram Vatala <sriram.v@altencalsoftlabs.com>; ovs-dev@openvswitch.org
Cc: Ilya Maximets <i.maximets@samsung.com>
Subject: Re: [ovs-dev] [PATCH v2] Detailed packet drop statistics per dpdk and 
vhostuser ports

On 18/06/2019 15:05, Kevin Traynor wrote:
> Hi Sriram,
>
> On 14/06/2019 14:38, Sriram Vatala via dev wrote:
>> OVS may be unable to transmit packets for multiple reasons and today
>> there is a single counter to track packets dropped due to any of
>> those reasons. The most common reason is that a VM is unable to read
>> packets fast enough causing the vhostuser port transmit queue on the
>> OVS side to become full. This manifests as a problem with VNFs not
>> receiving all packets. Having a separate drop counter to track
>> packets dropped because the transmit queue is full will clearly
>> indicate that the problem is on the VM side and not in OVS. Similarly
>> maintaining separate counters for all possible drops helps in
>> indicating sensible cause for packet drops.
>>
>> This patch adds counters to track packets dropped due to all possible
>> reasons and these counters are displayed along with other stats in
>> "ovs-vsctl get interface <iface> statistics"
>> command. The detailed stats will be available for both dpdk and
>> vhostuser ports.
>>
>> Following are the details of the new counters :
>>
>> tx_qfull_drops : These are the packets dropped due to transmit queue
>> overrun.
>>
>
> I'm not sure about this name, you would need to know that it was the
> only reason rte_eth_tx_burst() and rte_vhost_enqueue_burst() will not
> send pkts
>
>> tx_mtu_exceeded_drops : These are the packets dropped due to MTU
>> mismatch (i.e Pkt len > Max Dev MTU).
>>
>> tx_qos_drops/rx_qos_drops : These are the packets dropped due to
>> transmission/reception rate exceeding the configured Egress/Ingress
>> policer rate on the interface.
>>
>
> I think you also need to update vswitchd.xml with some docs for these
> stats
>

Actually, it doesn't seem needed there, perhaps something in the dpdk docs

<snip>

>> diff --git a/vswitchd/bridge.c b/vswitchd/bridge.c index
>> 0702cc6..2cb5558 100644
>> --- a/vswitchd/bridge.c
>> +++ b/vswitchd/bridge.c
>> @@ -2407,7 +2407,11 @@ iface_refresh_stats(struct iface *iface)
>>      IFACE_STAT(rx_undersized_errors,    "rx_undersized_errors")     \
>>      IFACE_STAT(rx_oversize_errors,      "rx_oversize_errors")       \
>>      IFACE_STAT(rx_fragmented_errors,    "rx_fragmented_errors")     \
>> -    IFACE_STAT(rx_jabber_errors,        "rx_jabber_errors")
>> +    IFACE_STAT(rx_jabber_errors,        "rx_jabber_errors")         \
>> +    IFACE_STAT(rx_qos_drops,            "rx_qos_drops")             \
>> +    IFACE_STAT(tx_qfull_drops,          "tx_qfull_drops")           \
>> +    IFACE_STAT(tx_qos_drops,            "tx_qos_drops")             \
>> +    IFACE_STAT(tx_mtu_drops,            "tx_mtu_exceeded_drops")
>>
>>  #define IFACE_STAT(MEMBER, NAME) + 1
>>      enum { N_IFACE_STATS = IFACE_STATS };
>>
>
> What about updating show_dpif() to print with ovs-appctl dpctl/show -s ?
>

scratch that comment, I since saw the discussion on v1

> _______________________________________________
> dev mailing list
> dev@openvswitch.org
> https://mail.openvswitch.org/mailman/listinfo/ovs-dev
>
Li,Rongqing via dev June 19, 2019, 10:48 a.m. UTC | #5
Hi Kevin,

Thanks for reviewing the patch. Please find my answers to your comments below.

Comment-1
I'm not sure about this name, you would need to know that it was the
only reason rte_eth_tx_burst() and rte_vhost_enqueue_burst() will not
send pkts.
---
Agree that queue full is not the only reason for which the above DPDK
apis will fail to send packets. But queue full is the most common reason.
Instead of overlooking other reasons like invalid queue-id, i can change
the name of the counter to a generic name something like "tx_failed".
what do you think?

Comment-2
There can be other drops earlier in this function
("__netdev_dpdk_vhost_send"), should they be logged also?
---
These are the drops for invalid queue-id or vhost device id and if device is
not up.
Again these are very unlikely to happen, but i can add a rate limiting warn
log.

Comment-3
>      rte_spinlock_lock(&dev->stats_lock);
>      netdev_dpdk_vhost_update_tx_counters(&dev->stats, pkts, total_pkts,
> +                                         dropped);
> +    dev->stats.tx_mtu_drops += mtu_drops;

There is a function just above for updating vhost tx stats. Now the
updates are split with some updated in the function and some updated
here, it would be better to update them all in the same place.
---
Agree. will change the implementation here.

Comment-4
>
> +    dropped += mtu_drops + qos_drops + qfull_drops;

= is enough, you don't need +=
---
Actually in the code above to this part in "dpdk_do_tx_copy" function, dropped
variable Will be updated if mbuf alloc fails.
Here is the code snippet:


        pkts[txcnt] = rte_pktmbuf_alloc(dev->dpdk_mp->mp);
        if (OVS_UNLIKELY(!pkts[txcnt])) {
            dropped += cnt - i;
            break;
        }

To take care not to miss this in total drops, i am using dropped+=, Even if
the above part of the code doesn't hit, as dropped variable is initialized to
Zero, that expression should not result in incorrect value for drops.


Comment-5

> +    <command label="ovs-vsctl-get-interface-statistics"
> +
> filters="ovs">/usr/share/openvswitch/scripts/ovs-bugtool-get-iface-stats
> +    </command>

Probably better to be consistent and not wrap this line
---
Ok. I wrapped this line as utilities/checkpatch script is giving warning as
the line length is exceeding 79 characters. I will unwrap this.



Comment-6

> I think you also need to update vswitchd.xml with some docs for these stats
>

Actually, it doesn't seem needed there, perhaps something in the dpdk docs

---
Bit unclear on where to document this new counters. As we have not done any
modifications to dpdk APIs, can i document these new counters in man page of
ovs-vsctl.
what do you think?

Thanks & Regards,
Sriram.

-----Original Message-----
From: Kevin Traynor <ktraynor@redhat.com>
Sent: 19 June 2019 01:09
To: Sriram Vatala <sriram.v@altencalsoftlabs.com>; ovs-dev@openvswitch.org
Cc: Ilya Maximets <i.maximets@samsung.com>
Subject: Re: [ovs-dev] [PATCH v2] Detailed packet drop statistics per dpdk and
vhostuser ports

On 18/06/2019 15:05, Kevin Traynor wrote:
> Hi Sriram,
>
> On 14/06/2019 14:38, Sriram Vatala via dev wrote:
>> OVS may be unable to transmit packets for multiple reasons and today
>> there is a single counter to track packets dropped due to any of
>> those reasons. The most common reason is that a VM is unable to read
>> packets fast enough causing the vhostuser port transmit queue on the
>> OVS side to become full. This manifests as a problem with VNFs not
>> receiving all packets. Having a separate drop counter to track
>> packets dropped because the transmit queue is full will clearly
>> indicate that the problem is on the VM side and not in OVS. Similarly
>> maintaining separate counters for all possible drops helps in
>> indicating sensible cause for packet drops.
>>
>> This patch adds counters to track packets dropped due to all possible
>> reasons and these counters are displayed along with other stats in
>> "ovs-vsctl get interface <iface> statistics"
>> command. The detailed stats will be available for both dpdk and
>> vhostuser ports.
>>
>> Following are the details of the new counters :
>>
>> tx_qfull_drops : These are the packets dropped due to transmit queue
>> overrun.
>>
>
> I'm not sure about this name, you would need to know that it was the
> only reason rte_eth_tx_burst() and rte_vhost_enqueue_burst() will not
> send pkts
>
>> tx_mtu_exceeded_drops : These are the packets dropped due to MTU
>> mismatch (i.e Pkt len > Max Dev MTU).
>>
>> tx_qos_drops/rx_qos_drops : These are the packets dropped due to
>> transmission/reception rate exceeding the configured Egress/Ingress
>> policer rate on the interface.
>>
>
> I think you also need to update vswitchd.xml with some docs for these
> stats
>

Actually, it doesn't seem needed there, perhaps something in the dpdk docs

<snip>

>> diff --git a/vswitchd/bridge.c b/vswitchd/bridge.c index
>> 0702cc6..2cb5558 100644
>> --- a/vswitchd/bridge.c
>> +++ b/vswitchd/bridge.c
>> @@ -2407,7 +2407,11 @@ iface_refresh_stats(struct iface *iface)
>>      IFACE_STAT(rx_undersized_errors,    "rx_undersized_errors")     \
>>      IFACE_STAT(rx_oversize_errors,      "rx_oversize_errors")       \
>>      IFACE_STAT(rx_fragmented_errors,    "rx_fragmented_errors")     \
>> -    IFACE_STAT(rx_jabber_errors,        "rx_jabber_errors")
>> +    IFACE_STAT(rx_jabber_errors,        "rx_jabber_errors")         \
>> +    IFACE_STAT(rx_qos_drops,            "rx_qos_drops")             \
>> +    IFACE_STAT(tx_qfull_drops,          "tx_qfull_drops")           \
>> +    IFACE_STAT(tx_qos_drops,            "tx_qos_drops")             \
>> +    IFACE_STAT(tx_mtu_drops,            "tx_mtu_exceeded_drops")
>>
>>  #define IFACE_STAT(MEMBER, NAME) + 1
>>      enum { N_IFACE_STATS = IFACE_STATS };
>>
>
> What about updating show_dpif() to print with ovs-appctl dpctl/show -s ?
>

scratch that comment, I since saw the discussion on v1

> _______________________________________________
> dev mailing list
> dev@openvswitch.org
> https://mail.openvswitch.org/mailman/listinfo/ovs-dev
>
Kevin Traynor June 19, 2019, 1:36 p.m. UTC | #6
On 19/06/2019 11:07, Sriram Vatala wrote:
> 
> Hi Kevin,
> 
> Thanks for reviewing the patch. Please find my answers to your comments below.
> 
> Comment-1
> I'm not sure about this name, you would need to know that it was the
> only reason rte_eth_tx_burst() and rte_vhost_enqueue_burst() will not
> send pkts.
> ---
> Agree that queue full is not the only reason for which the above DPDK
> apis will fail to send packets. But queue full is the most common reason.
> Instead of overlooking other reasons like invalid queue-id, i can change
> the name of the counter to a generic name something like "tx_failed_drops".
> what do you think?
> 

Sounds good to me.

> Comment-2
> There can be other drops earlier in this function 
> ("__netdev_dpdk_vhost_send"), should they be logged also?
> ---
> These are the drops for invalid queue-id or vhost device id and if device is 
> not up.
> Again these are very unlikely to happen, but i can add a rate limiting warn 
> log.
> 

Well I guess it doesn't invalidate your patches which provides drop
stats for mtu/qos/qfull(or new name), so maybe it's ok to not log those
other drops in the context of this patchset.

> Comment-3
>>      rte_spinlock_lock(&dev->stats_lock);
>>      netdev_dpdk_vhost_update_tx_counters(&dev->stats, pkts, total_pkts,
>> +                                         dropped);
>> +    dev->stats.tx_mtu_drops += mtu_drops;
> 
> There is a function just above for updating vhost tx stats. Now the
> updates are split with some updated in the function and some updated
> here, it would be better to update them all in the same place.
> ---
> Agree. will change the implementation here.
> 
> Comment-4
>>
>> +    dropped += mtu_drops + qos_drops + qfull_drops;
> 
> = is enough, you don't need +=
> ---
> Actually in the code above to this part in "dpdk_do_tx_copy" function, dropped 
> will updated if mbuf alloc fails.
> Here is the code snippet:
> 
> 
>         pkts[txcnt] = rte_pktmbuf_alloc(dev->dpdk_mp->mp);
>         if (OVS_UNLIKELY(!pkts[txcnt])) {
>             dropped += cnt - i;
>             break;
>         }
> 
> To take care not to miss this in total drops, i am using dropped+=, Even if 
> the above part of the code doesn't hit, as dropped variable is initialized to 
> Zero
> that expression should not result in incorrect value for drops.
> 

Yes, you're right, I missed it, thanks.

> 
> Comment-5
> 
>> +    <command label="ovs-vsctl-get-interface-statistics"
>> + 
>> filters="ovs">/usr/share/openvswitch/scripts/ovs-bugtool-get-iface-stats
>> +    </command>
> 
> Probably better to be consistent and not wrap this line
> ---
> Ok. I wrapped this line as utilities/checkpatch script is giving warning as 
> the line length is exceeding 79 characters. I will unwrap this.
> 
> 
> 
> Comment-6
> 
>> I think you also need to update vswitchd.xml with some docs for these stats
>>
> 
> Actually, it doesn't seem needed there, perhaps something in the dpdk docs
> 
> ---
> Bit unclear on where to document this new counters. As we have not done any 
> modifications to dpdk APIs, can i document these new counters in man page of 
> ovs-vsctl.
> what do you think?
> 

I'm exactly sure where either, I was thinking about somewhere in one of
the .rst's in the /Documentation directory. Maybe it's not strictly
necessary for all stats, but when there's small explanations in the
commit message for these ones, seems like they might be helpful in the docs.

thanks,
Kevin.

> Thanks & Regards,
> Sriram.
> 
> -----Original Message-----
> From: Kevin Traynor <ktraynor@redhat.com>
> Sent: 19 June 2019 01:09
> To: Sriram Vatala <sriram.v@altencalsoftlabs.com>; ovs-dev@openvswitch.org
> Cc: Ilya Maximets <i.maximets@samsung.com>
> Subject: Re: [ovs-dev] [PATCH v2] Detailed packet drop statistics per dpdk and 
> vhostuser ports
> 
> On 18/06/2019 15:05, Kevin Traynor wrote:
>> Hi Sriram,
>>
>> On 14/06/2019 14:38, Sriram Vatala via dev wrote:
>>> OVS may be unable to transmit packets for multiple reasons and today
>>> there is a single counter to track packets dropped due to any of
>>> those reasons. The most common reason is that a VM is unable to read
>>> packets fast enough causing the vhostuser port transmit queue on the
>>> OVS side to become full. This manifests as a problem with VNFs not
>>> receiving all packets. Having a separate drop counter to track
>>> packets dropped because the transmit queue is full will clearly
>>> indicate that the problem is on the VM side and not in OVS. Similarly
>>> maintaining separate counters for all possible drops helps in
>>> indicating sensible cause for packet drops.
>>>
>>> This patch adds counters to track packets dropped due to all possible
>>> reasons and these counters are displayed along with other stats in
>>> "ovs-vsctl get interface <iface> statistics"
>>> command. The detailed stats will be available for both dpdk and
>>> vhostuser ports.
>>>
>>> Following are the details of the new counters :
>>>
>>> tx_qfull_drops : These are the packets dropped due to transmit queue
>>> overrun.
>>>
>>
>> I'm not sure about this name, you would need to know that it was the
>> only reason rte_eth_tx_burst() and rte_vhost_enqueue_burst() will not
>> send pkts
>>
>>> tx_mtu_exceeded_drops : These are the packets dropped due to MTU
>>> mismatch (i.e Pkt len > Max Dev MTU).
>>>
>>> tx_qos_drops/rx_qos_drops : These are the packets dropped due to
>>> transmission/reception rate exceeding the configured Egress/Ingress
>>> policer rate on the interface.
>>>
>>
>> I think you also need to update vswitchd.xml with some docs for these
>> stats
>>
> 
> Actually, it doesn't seem needed there, perhaps something in the dpdk docs
> 
> <snip>
> 
>>> diff --git a/vswitchd/bridge.c b/vswitchd/bridge.c index
>>> 0702cc6..2cb5558 100644
>>> --- a/vswitchd/bridge.c
>>> +++ b/vswitchd/bridge.c
>>> @@ -2407,7 +2407,11 @@ iface_refresh_stats(struct iface *iface)
>>>      IFACE_STAT(rx_undersized_errors,    "rx_undersized_errors")     \
>>>      IFACE_STAT(rx_oversize_errors,      "rx_oversize_errors")       \
>>>      IFACE_STAT(rx_fragmented_errors,    "rx_fragmented_errors")     \
>>> -    IFACE_STAT(rx_jabber_errors,        "rx_jabber_errors")
>>> +    IFACE_STAT(rx_jabber_errors,        "rx_jabber_errors")         \
>>> +    IFACE_STAT(rx_qos_drops,            "rx_qos_drops")             \
>>> +    IFACE_STAT(tx_qfull_drops,          "tx_qfull_drops")           \
>>> +    IFACE_STAT(tx_qos_drops,            "tx_qos_drops")             \
>>> +    IFACE_STAT(tx_mtu_drops,            "tx_mtu_exceeded_drops")
>>>
>>>  #define IFACE_STAT(MEMBER, NAME) + 1
>>>      enum { N_IFACE_STATS = IFACE_STATS };
>>>
>>
>> What about updating show_dpif() to print with ovs-appctl dpctl/show -s ?
>>
> 
> scratch that comment, I since saw the discussion on v1
> 
>> _______________________________________________
>> dev mailing list
>> dev@openvswitch.org
>> https://mail.openvswitch.org/mailman/listinfo/ovs-dev
>>
Li,Rongqing via dev June 19, 2019, 3:13 p.m. UTC | #7
Hi kevin,

Thanks for your inputs. I will  send the updated patch ASAP.

Thanks & Regards,
Sriram.

-----Original Message-----
From: Kevin Traynor <ktraynor@redhat.com>
Sent: 19 June 2019 19:07
To: Sriram Vatala <sriram.v@altencalsoftlabs.com>; ovs-dev@openvswitch.org
Cc: 'Ilya Maximets' <i.maximets@samsung.com>
Subject: Re: [ovs-dev] [PATCH v2] Detailed packet drop statistics per dpdk and 
vhostuser ports

On 19/06/2019 11:07, Sriram Vatala wrote:
>
> Hi Kevin,
>
> Thanks for reviewing the patch. Please find my answers to your comments 
> below.
>
> Comment-1
> I'm not sure about this name, you would need to know that it was the
> only reason rte_eth_tx_burst() and rte_vhost_enqueue_burst() will not
> send pkts.
> ---
> Agree that queue full is not the only reason for which the above DPDK
> apis will fail to send packets. But queue full is the most common reason.
> Instead of overlooking other reasons like invalid queue-id, i can
> change the name of the counter to a generic name something like 
> "tx_failed_drops".
> what do you think?
>

Sounds good to me.

> Comment-2
> There can be other drops earlier in this function
> ("__netdev_dpdk_vhost_send"), should they be logged also?
> ---
> These are the drops for invalid queue-id or vhost device id and if
> device is not up.
> Again these are very unlikely to happen, but i can add a rate limiting
> warn log.
>

Well I guess it doesn't invalidate your patches which provides drop stats for 
mtu/qos/qfull(or new name), so maybe it's ok to not log those other drops in 
the context of this patchset.

> Comment-3
>>      rte_spinlock_lock(&dev->stats_lock);
>>      netdev_dpdk_vhost_update_tx_counters(&dev->stats, pkts,
>> total_pkts,
>> +                                         dropped);
>> +    dev->stats.tx_mtu_drops += mtu_drops;
>
> There is a function just above for updating vhost tx stats. Now the
> updates are split with some updated in the function and some updated
> here, it would be better to update them all in the same place.
> ---
> Agree. will change the implementation here.
>
> Comment-4
>>
>> +    dropped += mtu_drops + qos_drops + qfull_drops;
>
> = is enough, you don't need +=
> ---
> Actually in the code above to this part in "dpdk_do_tx_copy" function,
> dropped will updated if mbuf alloc fails.
> Here is the code snippet:
>
>
>         pkts[txcnt] = rte_pktmbuf_alloc(dev->dpdk_mp->mp);
>         if (OVS_UNLIKELY(!pkts[txcnt])) {
>             dropped += cnt - i;
>             break;
>         }
>
> To take care not to miss this in total drops, i am using dropped+=,
> Even if the above part of the code doesn't hit, as dropped variable is
> initialized to Zero that expression should not result in incorrect
> value for drops.
>

Yes, you're right, I missed it, thanks.

>
> Comment-5
>
>> +    <command label="ovs-vsctl-get-interface-statistics"
>> +
>> filters="ovs">/usr/share/openvswitch/scripts/ovs-bugtool-get-iface-st
>> ats
>> +    </command>
>
> Probably better to be consistent and not wrap this line
> ---
> Ok. I wrapped this line as utilities/checkpatch script is giving
> warning as the line length is exceeding 79 characters. I will unwrap this.
>
>
>
> Comment-6
>
>> I think you also need to update vswitchd.xml with some docs for these
>> stats
>>
>
> Actually, it doesn't seem needed there, perhaps something in the dpdk
> docs
>
> ---
> Bit unclear on where to document this new counters. As we have not
> done any modifications to dpdk APIs, can i document these new counters
> in man page of ovs-vsctl.
> what do you think?
>

I'm exactly sure where either, I was thinking about somewhere in one of the 
.rst's in the /Documentation directory. Maybe it's not strictly necessary for 
all stats, but when there's small explanations in the commit message for these 
ones, seems like they might be helpful in the docs.

thanks,
Kevin.

> Thanks & Regards,
> Sriram.
>
> -----Original Message-----
> From: Kevin Traynor <ktraynor@redhat.com>
> Sent: 19 June 2019 01:09
> To: Sriram Vatala <sriram.v@altencalsoftlabs.com>;
> ovs-dev@openvswitch.org
> Cc: Ilya Maximets <i.maximets@samsung.com>
> Subject: Re: [ovs-dev] [PATCH v2] Detailed packet drop statistics per
> dpdk and vhostuser ports
>
> On 18/06/2019 15:05, Kevin Traynor wrote:
>> Hi Sriram,
>>
>> On 14/06/2019 14:38, Sriram Vatala via dev wrote:
>>> OVS may be unable to transmit packets for multiple reasons and today
>>> there is a single counter to track packets dropped due to any of
>>> those reasons. The most common reason is that a VM is unable to read
>>> packets fast enough causing the vhostuser port transmit queue on the
>>> OVS side to become full. This manifests as a problem with VNFs not
>>> receiving all packets. Having a separate drop counter to track
>>> packets dropped because the transmit queue is full will clearly
>>> indicate that the problem is on the VM side and not in OVS.
>>> Similarly maintaining separate counters for all possible drops helps
>>> in indicating sensible cause for packet drops.
>>>
>>> This patch adds counters to track packets dropped due to all
>>> possible reasons and these counters are displayed along with other
>>> stats in "ovs-vsctl get interface <iface> statistics"
>>> command. The detailed stats will be available for both dpdk and
>>> vhostuser ports.
>>>
>>> Following are the details of the new counters :
>>>
>>> tx_qfull_drops : These are the packets dropped due to transmit queue
>>> overrun.
>>>
>>
>> I'm not sure about this name, you would need to know that it was the
>> only reason rte_eth_tx_burst() and rte_vhost_enqueue_burst() will not
>> send pkts
>>
>>> tx_mtu_exceeded_drops : These are the packets dropped due to MTU
>>> mismatch (i.e Pkt len > Max Dev MTU).
>>>
>>> tx_qos_drops/rx_qos_drops : These are the packets dropped due to
>>> transmission/reception rate exceeding the configured Egress/Ingress
>>> policer rate on the interface.
>>>
>>
>> I think you also need to update vswitchd.xml with some docs for these
>> stats
>>
>
> Actually, it doesn't seem needed there, perhaps something in the dpdk
> docs
>
> <snip>
>
>>> diff --git a/vswitchd/bridge.c b/vswitchd/bridge.c index
>>> 0702cc6..2cb5558 100644
>>> --- a/vswitchd/bridge.c
>>> +++ b/vswitchd/bridge.c
>>> @@ -2407,7 +2407,11 @@ iface_refresh_stats(struct iface *iface)
>>>      IFACE_STAT(rx_undersized_errors,    "rx_undersized_errors")     \
>>>      IFACE_STAT(rx_oversize_errors,      "rx_oversize_errors")       \
>>>      IFACE_STAT(rx_fragmented_errors,    "rx_fragmented_errors")     \
>>> -    IFACE_STAT(rx_jabber_errors,        "rx_jabber_errors")
>>> +    IFACE_STAT(rx_jabber_errors,        "rx_jabber_errors")         \
>>> +    IFACE_STAT(rx_qos_drops,            "rx_qos_drops")             \
>>> +    IFACE_STAT(tx_qfull_drops,          "tx_qfull_drops")           \
>>> +    IFACE_STAT(tx_qos_drops,            "tx_qos_drops")             \
>>> +    IFACE_STAT(tx_mtu_drops,            "tx_mtu_exceeded_drops")
>>>
>>>  #define IFACE_STAT(MEMBER, NAME) + 1
>>>      enum { N_IFACE_STATS = IFACE_STATS };
>>>
>>
>> What about updating show_dpif() to print with ovs-appctl dpctl/show -s ?
>>
>
> scratch that comment, I since saw the discussion on v1
>
>> _______________________________________________
>> dev mailing list
>> dev@openvswitch.org
>> https://mail.openvswitch.org/mailman/listinfo/ovs-dev
>>
diff mbox series

Patch

diff --git a/include/openvswitch/netdev.h b/include/openvswitch/netdev.h
index 0c10f7b..69480a4 100644
--- a/include/openvswitch/netdev.h
+++ b/include/openvswitch/netdev.h
@@ -61,6 +61,14 @@  struct netdev_stats {
     uint64_t tx_heartbeat_errors;
     uint64_t tx_window_errors;
 
+    /* Detailed receive drops. */
+    uint64_t rx_qos_drops;
+
+    /* Detailed transmit drops. */
+    uint64_t tx_qfull_drops;
+    uint64_t tx_qos_drops;
+    uint64_t tx_mtu_drops;
+
     /* Extended statistics based on RFC2819. */
     uint64_t rx_1_to_64_packets;
     uint64_t rx_65_to_127_packets;
diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
index 3498b32..6c2eb38 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -2124,6 +2124,7 @@  netdev_dpdk_vhost_update_rx_counters(struct netdev_stats *stats,
 
     stats->rx_packets += count;
     stats->rx_dropped += dropped;
+    stats->rx_qos_drops += dropped;
     for (i = 0; i < count; i++) {
         packet = packets[i];
         packet_size = dp_packet_size(packet);
@@ -2236,6 +2237,7 @@  netdev_dpdk_rxq_recv(struct netdev_rxq *rxq, struct dp_packet_batch *batch,
     if (OVS_UNLIKELY(dropped)) {
         rte_spinlock_lock(&dev->stats_lock);
         dev->stats.rx_dropped += dropped;
+        dev->stats.rx_qos_drops += dropped;
         rte_spinlock_unlock(&dev->stats_lock);
     }
 
@@ -2319,6 +2321,9 @@  __netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
     struct rte_mbuf **cur_pkts = (struct rte_mbuf **) pkts;
     unsigned int total_pkts = cnt;
     unsigned int dropped = 0;
+    unsigned int mtu_drops;
+    unsigned int qos_drops;
+    unsigned int qfull_drops;
     int i, retries = 0;
     int vid = netdev_dpdk_get_vid(dev);
 
@@ -2335,9 +2340,11 @@  __netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
     rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
 
     cnt = netdev_dpdk_filter_packet_len(dev, cur_pkts, cnt);
+    mtu_drops = total_pkts - cnt;
+    qos_drops = cnt;
     /* Check has QoS has been configured for the netdev */
     cnt = netdev_dpdk_qos_run(dev, cur_pkts, cnt, true);
-    dropped = total_pkts - cnt;
+    qos_drops -= cnt;
 
     do {
         int vhost_qid = qid * VIRTIO_QNUM + VIRTIO_RXQ;
@@ -2357,9 +2364,14 @@  __netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
 
     rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
 
+    qfull_drops = cnt;
+    dropped = mtu_drops + qos_drops + qfull_drops;
     rte_spinlock_lock(&dev->stats_lock);
     netdev_dpdk_vhost_update_tx_counters(&dev->stats, pkts, total_pkts,
-                                         cnt + dropped);
+                                         dropped);
+    dev->stats.tx_mtu_drops += mtu_drops;
+    dev->stats.tx_qos_drops += qos_drops;
+    dev->stats.tx_qfull_drops += qfull_drops;
     rte_spinlock_unlock(&dev->stats_lock);
 
 out:
@@ -2384,12 +2396,15 @@  dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet_batch *batch)
     struct rte_mbuf *pkts[PKT_ARRAY_SIZE];
     uint32_t cnt = batch_cnt;
     uint32_t dropped = 0;
+    uint32_t mtu_drops = 0;
+    uint32_t qos_drops = 0;
+    uint32_t qfull_drops = 0;
 
     if (dev->type != DPDK_DEV_VHOST) {
         /* Check if QoS has been configured for this netdev. */
         cnt = netdev_dpdk_qos_run(dev, (struct rte_mbuf **) batch->packets,
                                   batch_cnt, false);
-        dropped += batch_cnt - cnt;
+        qos_drops = batch_cnt - cnt;
     }
 
     uint32_t txcnt = 0;
@@ -2402,7 +2417,7 @@  dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet_batch *batch)
             VLOG_WARN_RL(&rl, "Too big size %u max_packet_len %d",
                          size, dev->max_packet_len);
 
-            dropped++;
+            mtu_drops++;
             continue;
         }
 
@@ -2425,13 +2440,17 @@  dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet_batch *batch)
             __netdev_dpdk_vhost_send(netdev, qid, (struct dp_packet **) pkts,
                                      txcnt);
         } else {
-            dropped += netdev_dpdk_eth_tx_burst(dev, qid, pkts, txcnt);
+            qfull_drops = netdev_dpdk_eth_tx_burst(dev, qid, pkts, txcnt);
         }
     }
 
+    dropped += mtu_drops + qos_drops + qfull_drops;
     if (OVS_UNLIKELY(dropped)) {
         rte_spinlock_lock(&dev->stats_lock);
         dev->stats.tx_dropped += dropped;
+        dev->stats.tx_mtu_drops += mtu_drops;
+        dev->stats.tx_qos_drops += qos_drops;
+        dev->stats.tx_qfull_drops += qfull_drops;
         rte_spinlock_unlock(&dev->stats_lock);
     }
 }
@@ -2473,18 +2492,25 @@  netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
         dp_packet_delete_batch(batch, true);
     } else {
         int tx_cnt, dropped;
+        int mtu_drops, qfull_drops, qos_drops;
         int batch_cnt = dp_packet_batch_size(batch);
         struct rte_mbuf **pkts = (struct rte_mbuf **) batch->packets;
 
         tx_cnt = netdev_dpdk_filter_packet_len(dev, pkts, batch_cnt);
+        mtu_drops = batch_cnt - tx_cnt;
+        qos_drops = tx_cnt;
         tx_cnt = netdev_dpdk_qos_run(dev, pkts, tx_cnt, true);
-        dropped = batch_cnt - tx_cnt;
+        qos_drops -= tx_cnt;
 
-        dropped += netdev_dpdk_eth_tx_burst(dev, qid, pkts, tx_cnt);
+        qfull_drops = netdev_dpdk_eth_tx_burst(dev, qid, pkts, tx_cnt);
 
+        dropped = mtu_drops + qos_drops + qfull_drops;
         if (OVS_UNLIKELY(dropped)) {
             rte_spinlock_lock(&dev->stats_lock);
             dev->stats.tx_dropped += dropped;
+            dev->stats.tx_mtu_drops += mtu_drops;
+            dev->stats.tx_qos_drops += qos_drops;
+            dev->stats.tx_qfull_drops += qfull_drops;
             rte_spinlock_unlock(&dev->stats_lock);
         }
     }
@@ -2598,6 +2624,11 @@  netdev_dpdk_vhost_get_stats(const struct netdev *netdev,
     stats->rx_errors = dev->stats.rx_errors;
     stats->rx_length_errors = dev->stats.rx_length_errors;
 
+    stats->tx_mtu_drops = dev->stats.tx_mtu_drops;
+    stats->tx_qos_drops = dev->stats.tx_qos_drops;
+    stats->tx_qfull_drops = dev->stats.tx_qfull_drops;
+    stats->rx_qos_drops = dev->stats.rx_qos_drops;
+
     stats->rx_1_to_64_packets = dev->stats.rx_1_to_64_packets;
     stats->rx_65_to_127_packets = dev->stats.rx_65_to_127_packets;
     stats->rx_128_to_255_packets = dev->stats.rx_128_to_255_packets;
@@ -2733,6 +2764,10 @@  out:
     rte_spinlock_lock(&dev->stats_lock);
     stats->tx_dropped = dev->stats.tx_dropped;
     stats->rx_dropped = dev->stats.rx_dropped;
+    stats->tx_mtu_drops = dev->stats.tx_mtu_drops;
+    stats->tx_qos_drops = dev->stats.tx_qos_drops;
+    stats->tx_qfull_drops = dev->stats.tx_qfull_drops;
+    stats->rx_qos_drops = dev->stats.rx_qos_drops;
     rte_spinlock_unlock(&dev->stats_lock);
 
     /* These are the available DPDK counters for packets not received due to
diff --git a/utilities/bugtool/automake.mk b/utilities/bugtool/automake.mk
index 18fa347..9657468 100644
--- a/utilities/bugtool/automake.mk
+++ b/utilities/bugtool/automake.mk
@@ -22,7 +22,8 @@  bugtool_scripts = \
 	utilities/bugtool/ovs-bugtool-ovs-bridge-datapath-type \
 	utilities/bugtool/ovs-bugtool-ovs-vswitchd-threads-affinity \
 	utilities/bugtool/ovs-bugtool-qos-configs \
-	utilities/bugtool/ovs-bugtool-get-dpdk-nic-numa
+	utilities/bugtool/ovs-bugtool-get-dpdk-nic-numa \
+	utilities/bugtool/ovs-bugtool-get-iface-stats
 
 scripts_SCRIPTS += $(bugtool_scripts)
 
diff --git a/utilities/bugtool/ovs-bugtool-get-iface-stats b/utilities/bugtool/ovs-bugtool-get-iface-stats
new file mode 100755
index 0000000..0fe175e
--- /dev/null
+++ b/utilities/bugtool/ovs-bugtool-get-iface-stats
@@ -0,0 +1,25 @@ 
+#! /bin/bash
+
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of version 2.1 of the GNU Lesser General
+# Public License as published by the Free Software Foundation.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# Copyright (C) 2019 Ericsson AB
+
+for bridge in `ovs-vsctl -- --real list-br`
+do
+    echo -e "\nBridge : ${bridge}\n"
+    for iface in `ovs-vsctl list-ifaces ${bridge}`
+    do
+        echo -e "iface : ${iface}"
+        ovs-vsctl get interface ${iface} statistics
+        echo -e "\n"
+    done
+    echo -e "iface : ${bridge}"
+    ovs-vsctl get interface ${bridge} statistics
+done
diff --git a/utilities/bugtool/plugins/network-status/openvswitch.xml b/utilities/bugtool/plugins/network-status/openvswitch.xml
index d39867c..327c006 100644
--- a/utilities/bugtool/plugins/network-status/openvswitch.xml
+++ b/utilities/bugtool/plugins/network-status/openvswitch.xml
@@ -34,6 +34,9 @@ 
     <command label="ovs-appctl-dpctl-dump-flows-netdev" filters="ovs" repeat="2">ovs-appctl dpctl/dump-flows netdev@ovs-netdev</command>
     <command label="ovs-appctl-dpctl-dump-flows-system" filters="ovs" repeat="2">ovs-appctl dpctl/dump-flows system@ovs-system</command>
     <command label="ovs-appctl-dpctl-show-s" filters="ovs" repeat="2">ovs-appctl dpctl/show -s</command>
+    <command label="ovs-vsctl-get-interface-statistics"
+     filters="ovs">/usr/share/openvswitch/scripts/ovs-bugtool-get-iface-stats
+    </command>
     <command label="ovs-ofctl-show" filters="ovs">/usr/share/openvswitch/scripts/ovs-bugtool-ovs-ofctl-loop-over-bridges "show"</command>
     <command label="ovs-ofctl-dump-flows" filters="ovs" repeat="2">/usr/share/openvswitch/scripts/ovs-bugtool-ovs-ofctl-loop-over-bridges "dump-flows"</command>
     <command label="ovs-ofctl-dump-ports" filters="ovs" repeat="2">/usr/share/openvswitch/scripts/ovs-bugtool-ovs-ofctl-loop-over-bridges "dump-ports"</command>
diff --git a/vswitchd/bridge.c b/vswitchd/bridge.c
index 0702cc6..2cb5558 100644
--- a/vswitchd/bridge.c
+++ b/vswitchd/bridge.c
@@ -2407,7 +2407,11 @@  iface_refresh_stats(struct iface *iface)
     IFACE_STAT(rx_undersized_errors,    "rx_undersized_errors")     \
     IFACE_STAT(rx_oversize_errors,      "rx_oversize_errors")       \
     IFACE_STAT(rx_fragmented_errors,    "rx_fragmented_errors")     \
-    IFACE_STAT(rx_jabber_errors,        "rx_jabber_errors")
+    IFACE_STAT(rx_jabber_errors,        "rx_jabber_errors")         \
+    IFACE_STAT(rx_qos_drops,            "rx_qos_drops")             \
+    IFACE_STAT(tx_qfull_drops,          "tx_qfull_drops")           \
+    IFACE_STAT(tx_qos_drops,            "tx_qos_drops")             \
+    IFACE_STAT(tx_mtu_drops,            "tx_mtu_exceeded_drops")
 
 #define IFACE_STAT(MEMBER, NAME) + 1
     enum { N_IFACE_STATS = IFACE_STATS };