diff mbox

[ovs-dev,CudaMailTagged,v3] netdev-dpdk: In some case, needn't reconfigure pmd threads when changing MTU

Message ID 20161011184659.10059-1-xu.binbin1@zte.com.cn
State Rejected
Delegated to: Daniele Di Proietto
Headers show

Commit Message

Xu Binbin Oct. 11, 2016, 6:46 p.m. UTC
If the port is not an ethernet port, and the aligned size for the new MTU
doesn't change, we needn't to reconfigure pmd thread. What we should do
is that updating 'max_packet_len' atomic.

Signed-off-by: Binbin Xu <xu.binbin1@zte.com.cn>
---
 lib/netdev-dpdk.c | 38 ++++++++++++++++++++++++++++----------
 1 file changed, 28 insertions(+), 10 deletions(-)

Comments

Daniele Di Proietto Nov. 16, 2016, 12:55 a.m. UTC | #1
2016-10-11 11:46 GMT-07:00 Binbin Xu <xu.binbin1@zte.com.cn>:

> If the port is not an ethernet port, and the aligned size for the new MTU
> doesn't change, we needn't to reconfigure pmd thread. What we should do
> is that updating 'max_packet_len' atomic.
>

Thanks for the patch, I realized that this will not be possible with the
vhost pmd proposed here:

https://mail.openvswitch.org/pipermail/ovs-dev/2016-October/243394.html

I'm afraid we have to stick to full reconfiguration for every mtu change.
Even though on current master and branch-2.6 reconfiguration is slow (it
brings down all the threads), I think we can make it faster.
diff mbox

Patch

diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
index 39bf930..f13bc8e 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -338,7 +338,7 @@  struct ingress_policer {
 struct netdev_dpdk {
     struct netdev up;
     int port_id;
-    int max_packet_len;
+    atomic_int max_packet_len;
     enum dpdk_dev_type type;
 
     struct dpdk_tx_queue *tx_q;
@@ -586,7 +586,8 @@  netdev_dpdk_mempool_configure(struct netdev_dpdk *dev)
         dev->dpdk_mp = mp;
         dev->mtu = dev->requested_mtu;
         dev->socket_id = dev->requested_socket_id;
-        dev->max_packet_len = MTU_TO_FRAME_LEN(dev->mtu);
+        atomic_store_relaxed(&dev->max_packet_len,
+            MTU_TO_FRAME_LEN(dev->mtu));
     }
 
     return 0;
@@ -647,7 +648,8 @@  dpdk_eth_dev_queue_setup(struct netdev_dpdk *dev, int n_rxq, int n_txq)
 
     if (dev->mtu > ETHER_MTU) {
         conf.rxmode.jumbo_frame = 1;
-        conf.rxmode.max_rx_pkt_len = dev->max_packet_len;
+        atomic_read_relaxed(&dev->max_packet_len, 
+            &conf.rxmode.max_rx_pkt_len);
     } else {
         conf.rxmode.jumbo_frame = 0;
         conf.rxmode.max_rx_pkt_len = 0;
@@ -840,7 +842,7 @@  netdev_dpdk_init(struct netdev *netdev, unsigned int port_no,
     dev->type = type;
     dev->flags = 0;
     dev->requested_mtu = dev->mtu = ETHER_MTU;
-    dev->max_packet_len = MTU_TO_FRAME_LEN(dev->mtu);
+    atomic_init(&dev->max_packet_len, MTU_TO_FRAME_LEN(dev->mtu));
     ovsrcu_index_init(&dev->vid, -1);
     dev->vhost_reconfigured = false;
 
@@ -1511,12 +1513,15 @@  netdev_dpdk_filter_packet_len(struct netdev_dpdk *dev, struct rte_mbuf **pkts,
     int i = 0;
     int cnt = 0;
     struct rte_mbuf *pkt;
+    int max_packet_len;
+
+    atomic_read_relaxed(&dev->max_packet_len, &max_packet_len);
 
     for (i = 0; i < pkt_cnt; i++) {
         pkt = pkts[i];
-        if (OVS_UNLIKELY(pkt->pkt_len > dev->max_packet_len)) {
+        if (OVS_UNLIKELY(pkt->pkt_len > max_packet_len)) {
             VLOG_WARN_RL(&rl, "%s: Too big size %" PRIu32 " max_packet_len %d",
-                         dev->up.name, pkt->pkt_len, dev->max_packet_len);
+                         dev->up.name, pkt->pkt_len, max_packet_len);
             rte_pktmbuf_free(pkt);
             continue;
         }
@@ -1620,6 +1625,7 @@  dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet_batch *batch)
     int dropped = 0;
     int newcnt = 0;
     int i;
+    int max_packet_len;
 
     /* If we are on a non pmd thread we have to use the mempool mutex, because
      * every non pmd thread shares the same mempool cache */
@@ -1630,12 +1636,14 @@  dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet_batch *batch)
 
     dp_packet_batch_apply_cutlen(batch);
 
+    atomic_read_relaxed(&dev->max_packet_len, &max_packet_len);
+
     for (i = 0; i < batch->count; i++) {
         int size = dp_packet_size(batch->packets[i]);
 
-        if (OVS_UNLIKELY(size > dev->max_packet_len)) {
+        if (OVS_UNLIKELY(size > max_packet_len)) {
             VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
-                         (int) size, dev->max_packet_len);
+                         (int) size, max_packet_len);
 
             dropped++;
             continue;
@@ -1803,7 +1811,15 @@  netdev_dpdk_set_mtu(struct netdev *netdev, int mtu)
     ovs_mutex_lock(&dev->mutex);
     if (dev->requested_mtu != mtu) {
         dev->requested_mtu = mtu;
-        netdev_request_reconfigure(netdev);
+        
+        if (dev->type == DPDK_DEV_ETH
+            || dpdk_buf_size(dev->mtu) != dpdk_buf_size(mtu)) {
+            netdev_request_reconfigure(netdev);
+        } else {
+            dev->mtu = mtu;
+            atomic_store_relaxed(&dev->max_packet_len, 
+                MTU_TO_FRAME_LEN(dev->mtu));
+        }
     }
     ovs_mutex_unlock(&dev->mutex);
 
@@ -2234,6 +2250,7 @@  netdev_dpdk_get_status(const struct netdev *netdev, struct smap *args)
 {
     struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
     struct rte_eth_dev_info dev_info;
+    int max_packet_len;
 
     if (!rte_eth_dev_is_valid_port(dev->port_id)) {
         return ENODEV;
@@ -2248,7 +2265,8 @@  netdev_dpdk_get_status(const struct netdev *netdev, struct smap *args)
                            rte_eth_dev_socket_id(dev->port_id));
     smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
     smap_add_format(args, "min_rx_bufsize", "%u", dev_info.min_rx_bufsize);
-    smap_add_format(args, "max_rx_pktlen", "%u", dev->max_packet_len);
+    atomic_read_relaxed(&dev->max_packet_len, &max_packet_len);
+    smap_add_format(args, "max_rx_pktlen", "%u", max_packet_len);
     smap_add_format(args, "max_rx_queues", "%u", dev_info.max_rx_queues);
     smap_add_format(args, "max_tx_queues", "%u", dev_info.max_tx_queues);
     smap_add_format(args, "max_mac_addrs", "%u", dev_info.max_mac_addrs);