@@ -27,8 +27,6 @@
Userspace Datapath - TSO
========================
-**Note:** This feature is considered experimental.
-
TCP Segmentation Offload (TSO) enables a network stack to delegate segmentation
of an oversized TCP segment to the underlying physical NIC. Offload of frame
segmentation achieves computational savings in the core, freeing up CPU cycles
@@ -51,16 +49,6 @@ __ https://doc.dpdk.org/guides-20.11/nics/overview.html
Enabling TSO
~~~~~~~~~~~~
-The TSO support may be enabled via a global config value
-``userspace-tso-enable``. Setting this to ``true`` enables TSO support for
-all ports.::
-
- $ ovs-vsctl set Open_vSwitch . other_config:userspace-tso-enable=true
-
-The default value is ``false``.
-
-Changing ``userspace-tso-enable`` requires restarting the daemon.
-
When using :doc:`vHost User ports <dpdk/vhost-user>`, TSO may be enabled
as follows.
@@ -363,8 +363,6 @@ lib_libopenvswitch_la_SOURCES = \
lib/unicode.h \
lib/unixctl.c \
lib/unixctl.h \
- lib/userspace-tso.c \
- lib/userspace-tso.h \
lib/util.c \
lib/util.h \
lib/uuid.c \
@@ -65,7 +65,6 @@
#include "timeval.h"
#include "unaligned.h"
#include "unixctl.h"
-#include "userspace-tso.h"
#include "util.h"
#include "uuid.h"
@@ -1180,16 +1179,13 @@ dpdk_eth_dev_init(struct netdev_dpdk *dev)
dev->hw_ol_features &= ~NETDEV_TX_SCTP_CKSUM_OFFLOAD;
}
- dev->hw_ol_features &= ~NETDEV_TX_TSO_OFFLOAD;
- if (userspace_tso_enabled()) {
- if (info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) {
- dev->hw_ol_features |= NETDEV_TX_TSO_OFFLOAD;
- } else {
- VLOG_WARN("%s: Tx TSO offload is not supported.",
- netdev_get_name(&dev->up));
- }
+ if (info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) {
+ dev->hw_ol_features |= NETDEV_TX_TSO_OFFLOAD;
+ } else {
+ dev->hw_ol_features &= ~NETDEV_TX_TSO_OFFLOAD;
}
+
n_rxq = MIN(info.max_rx_queues, dev->up.n_rxq);
n_txq = MIN(info.max_tx_queues, dev->up.n_txq);
@@ -1419,16 +1415,13 @@ netdev_dpdk_vhost_construct(struct netdev *netdev)
goto out;
}
- if (!userspace_tso_enabled()) {
- err = rte_vhost_driver_disable_features(dev->vhost_id,
- 1ULL << VIRTIO_NET_F_HOST_TSO4
- | 1ULL << VIRTIO_NET_F_HOST_TSO6
- | 1ULL << VIRTIO_NET_F_CSUM);
- if (err) {
- VLOG_ERR("rte_vhost_driver_disable_features failed for vhost user "
- "port: %s\n", name);
- goto out;
- }
+ uint64_t vhost_unsup_flags = 1ULL << VIRTIO_NET_F_HOST_ECN
+ | 1ULL << VIRTIO_NET_F_HOST_UFO;
+ err = rte_vhost_driver_disable_features(dev->vhost_id, vhost_unsup_flags);
+ if (err) {
+ VLOG_ERR("rte_vhost_driver_disable_features failed for vhost user "
+ "port: %s\n", name);
+ goto out;
}
err = rte_vhost_driver_start(dev->vhost_id);
@@ -5118,11 +5111,6 @@ dpdk_vhost_reconfigure_helper(struct netdev_dpdk *dev)
dev->tx_q[0].map = 0;
}
- if (userspace_tso_enabled()) {
- dev->hw_ol_features |= NETDEV_TX_TSO_OFFLOAD;
- VLOG_DBG("%s: TSO enabled on vhost port", netdev_get_name(&dev->up));
- }
-
netdev_dpdk_remap_txqs(dev);
err = netdev_dpdk_mempool_configure(dev);
@@ -5189,9 +5177,7 @@ netdev_dpdk_vhost_client_reconfigure(struct netdev *netdev)
}
/* Enable External Buffers if TCP Segmentation Offload is enabled. */
- if (userspace_tso_enabled()) {
- vhost_flags |= RTE_VHOST_USER_EXTBUF_SUPPORT;
- }
+ vhost_flags |= RTE_VHOST_USER_EXTBUF_SUPPORT;
err = rte_vhost_driver_register(dev->vhost_id, vhost_flags);
if (err) {
@@ -5214,22 +5200,12 @@ netdev_dpdk_vhost_client_reconfigure(struct netdev *netdev)
goto unlock;
}
- vhost_unsup_flags = 1ULL << VIRTIO_NET_F_HOST_ECN
- | 1ULL << VIRTIO_NET_F_HOST_UFO;
-
dev->hw_ol_features |= NETDEV_TX_IPV4_CKSUM_OFFLOAD;
dev->hw_ol_features |= NETDEV_TX_TCP_CKSUM_OFFLOAD;
dev->hw_ol_features |= NETDEV_TX_UDP_CKSUM_OFFLOAD;
- dev->hw_ol_features |= NETDEV_TX_SCTP_CKSUM_OFFLOAD;
- if (userspace_tso_enabled()) {
- dev->hw_ol_features |= NETDEV_TX_TSO_OFFLOAD;
- VLOG_DBG("%s: TSO enabled on vhost port",
- netdev_get_name(&dev->up));
- } else {
- vhost_unsup_flags = 1ULL << VIRTIO_NET_F_HOST_TSO4
- | 1ULL << VIRTIO_NET_F_HOST_TSO6;
- }
-
+ dev->hw_ol_features |= NETDEV_TX_TSO_OFFLOAD;
+ vhost_unsup_flags = 1ULL << VIRTIO_NET_F_HOST_ECN
+ | 1ULL << VIRTIO_NET_F_HOST_UFO;
err = rte_vhost_driver_disable_features(dev->vhost_id,
vhost_unsup_flags);
if (err) {
@@ -78,7 +78,6 @@
#include "timer.h"
#include "unaligned.h"
#include "openvswitch/vlog.h"
-#include "userspace-tso.h"
#include "util.h"
VLOG_DEFINE_THIS_MODULE(netdev_linux);
@@ -942,12 +941,11 @@ netdev_linux_construct(struct netdev *netdev_)
/* The socket interface doesn't offer the option to enable only
* csum offloading without TSO. */
- if (userspace_tso_enabled()) {
- netdev_->ol_flags |= NETDEV_OFFLOAD_TX_TCP_TSO;
- netdev_->ol_flags |= NETDEV_OFFLOAD_TX_TCP_CSUM;
- netdev_->ol_flags |= NETDEV_OFFLOAD_TX_UDP_CSUM;
- netdev_->ol_flags |= NETDEV_OFFLOAD_TX_IPV4_CSUM;
- }
+ netdev_->ol_flags |= NETDEV_OFFLOAD_TX_TCP_TSO;
+ netdev_->ol_flags |= NETDEV_OFFLOAD_TX_TCP_CSUM;
+ netdev_->ol_flags |= NETDEV_OFFLOAD_TX_UDP_CSUM;
+ netdev_->ol_flags |= NETDEV_OFFLOAD_TX_SCTP_CSUM;
+ netdev_->ol_flags |= NETDEV_OFFLOAD_TX_IPV4_CSUM;
error = get_flags(&netdev->up, &netdev->ifi_flags);
if (error == ENODEV) {
@@ -1017,19 +1015,12 @@ netdev_linux_construct_tap(struct netdev *netdev_)
goto error_close;
}
- oflags = TUN_F_CSUM;
- if (userspace_tso_enabled()) {
- oflags |= (TUN_F_TSO4 | TUN_F_TSO6);
- }
-
+ oflags = TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6;
if (ioctl(netdev->tap_fd, TUNSETOFFLOAD, oflags) == 0) {
netdev_->ol_flags |= (NETDEV_OFFLOAD_TX_IPV4_CSUM
| NETDEV_OFFLOAD_TX_TCP_CSUM
- | NETDEV_OFFLOAD_TX_UDP_CSUM);
-
- if (userspace_tso_enabled()) {
- netdev_->ol_flags |= NETDEV_OFFLOAD_TX_TCP_TSO;
- }
+ | NETDEV_OFFLOAD_TX_UDP_CSUM
+ | NETDEV_OFFLOAD_TX_TCP_TSO);
} else {
VLOG_WARN("%s: Disabling hardware offloading: %s", name,
ovs_strerror(errno));
@@ -1122,9 +1113,8 @@ netdev_linux_rxq_construct(struct netdev_rxq *rxq_)
goto error;
}
- if (userspace_tso_enabled()
- && setsockopt(rx->fd, SOL_PACKET, PACKET_VNET_HDR, &val,
- sizeof val)) {
+ if (setsockopt(rx->fd, SOL_PACKET, PACKET_VNET_HDR, &val,
+ sizeof val)) {
error = errno;
VLOG_ERR("%s: failed to enable vnet hdr in txq raw socket: %s",
netdev_get_name(netdev_), ovs_strerror(errno));
@@ -1229,12 +1219,12 @@ static int
netdev_linux_batch_rxq_recv_sock(struct netdev_rxq_linux *rx, int mtu,
struct dp_packet_batch *batch)
{
- int iovlen;
size_t std_len;
ssize_t retval;
- int virtio_net_hdr_size;
- struct iovec iovs[NETDEV_MAX_BURST][IOV_TSO_SIZE];
struct cmsghdr *cmsg;
+ int iovlen = IOV_TSO_SIZE;
+ struct iovec iovs[NETDEV_MAX_BURST][IOV_TSO_SIZE];
+ int virtio_net_hdr_size = sizeof(struct virtio_net_hdr);
union {
struct cmsghdr cmsg;
char buffer[CMSG_SPACE(sizeof(struct tpacket_auxdata))];
@@ -1243,17 +1233,6 @@ netdev_linux_batch_rxq_recv_sock(struct netdev_rxq_linux *rx, int mtu,
struct dp_packet *buffers[NETDEV_MAX_BURST];
int i;
- if (userspace_tso_enabled()) {
- /* Use the buffer from the allocated packet below to receive MTU
- * sized packets and an aux_buf for extra TSO data. */
- iovlen = IOV_TSO_SIZE;
- virtio_net_hdr_size = sizeof(struct virtio_net_hdr);
- } else {
- /* Use only the buffer from the allocated packet. */
- iovlen = IOV_STD_SIZE;
- virtio_net_hdr_size = 0;
- }
-
/* The length here needs to be accounted in the same way when the
* aux_buf is allocated so that it can be prepended to TSO buffer. */
std_len = virtio_net_hdr_size + VLAN_ETH_HEADER_LEN + mtu;
@@ -1261,10 +1240,8 @@ netdev_linux_batch_rxq_recv_sock(struct netdev_rxq_linux *rx, int mtu,
buffers[i] = dp_packet_new_with_headroom(std_len, DP_NETDEV_HEADROOM);
iovs[i][IOV_PACKET].iov_base = dp_packet_data(buffers[i]);
iovs[i][IOV_PACKET].iov_len = std_len;
- if (iovlen == IOV_TSO_SIZE) {
- iovs[i][IOV_AUXBUF].iov_base = dp_packet_data(rx->aux_bufs[i]);
- iovs[i][IOV_AUXBUF].iov_len = dp_packet_tailroom(rx->aux_bufs[i]);
- }
+ iovs[i][IOV_AUXBUF].iov_base = dp_packet_data(rx->aux_bufs[i]);
+ iovs[i][IOV_AUXBUF].iov_len = dp_packet_tailroom(rx->aux_bufs[i]);
mmsgs[i].msg_hdr.msg_name = NULL;
mmsgs[i].msg_hdr.msg_namelen = 0;
@@ -1332,21 +1309,19 @@ netdev_linux_batch_rxq_recv_sock(struct netdev_rxq_linux *rx, int mtu,
pkt = buffers[i];
}
- if (virtio_net_hdr_size) {
- int ret = netdev_linux_parse_vnet_hdr(pkt);
- if (OVS_UNLIKELY(ret)) {
- struct netdev *netdev_ = netdev_rxq_get_netdev(&rx->up);
- struct netdev_linux *netdev = netdev_linux_cast(netdev_);
+ int ret = netdev_linux_parse_vnet_hdr(pkt);
+ if (OVS_UNLIKELY(ret)) {
+ struct netdev *netdev_ = netdev_rxq_get_netdev(&rx->up);
+ struct netdev_linux *netdev = netdev_linux_cast(netdev_);
- /* Unexpected error situation: the virtio header is not present
- * or corrupted or contains unsupported features. Drop the packet
- * but continue in case next ones are correct. */
- dp_packet_delete(pkt);
- netdev->rx_dropped += 1;
- VLOG_WARN_RL(&rl, "%s: Dropped packet: %s",
- netdev_get_name(netdev_), ovs_strerror(ret));
- continue;
- }
+ /* Unexpected error situation: the virtio header is not present
+ * or corrupted or contains unsupported features. Drop the packet
+ * but continue in case next ones are correct. */
+ dp_packet_delete(pkt);
+ netdev->rx_dropped += 1;
+ VLOG_WARN_RL(&rl, "%s: Dropped packet: %s",
+ netdev_get_name(netdev_), ovs_strerror(ret));
+ continue;
}
for (cmsg = CMSG_FIRSTHDR(&mmsgs[i].msg_hdr); cmsg;
@@ -1394,20 +1369,11 @@ static int
netdev_linux_batch_rxq_recv_tap(struct netdev_rxq_linux *rx, int mtu,
struct dp_packet_batch *batch)
{
+ int iovlen = IOV_TSO_SIZE;
ssize_t retval;
size_t std_len;
- int iovlen;
int i;
- if (userspace_tso_enabled()) {
- /* Use the buffer from the allocated packet below to receive MTU
- * sized packets and an aux_buf for extra TSO data. */
- iovlen = IOV_TSO_SIZE;
- } else {
- /* Use only the buffer from the allocated packet. */
- iovlen = IOV_STD_SIZE;
- }
-
/* The length here needs to be accounted in the same way when the
* aux_buf is allocated so that it can be prepended to TSO buffer. */
std_len = sizeof(struct virtio_net_hdr) + VLAN_ETH_HEADER_LEN + mtu;
@@ -1420,10 +1386,8 @@ netdev_linux_batch_rxq_recv_tap(struct netdev_rxq_linux *rx, int mtu,
buffer = dp_packet_new_with_headroom(std_len, DP_NETDEV_HEADROOM);
iov[IOV_PACKET].iov_base = dp_packet_data(buffer);
iov[IOV_PACKET].iov_len = std_len;
- if (iovlen == IOV_TSO_SIZE) {
- iov[IOV_AUXBUF].iov_base = dp_packet_data(rx->aux_bufs[i]);
- iov[IOV_AUXBUF].iov_len = dp_packet_tailroom(rx->aux_bufs[i]);
- }
+ iov[IOV_AUXBUF].iov_base = dp_packet_data(rx->aux_bufs[i]);
+ iov[IOV_AUXBUF].iov_len = dp_packet_tailroom(rx->aux_bufs[i]);
do {
retval = readv(rx->fd, iov, iovlen);
@@ -1487,21 +1451,19 @@ netdev_linux_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet_batch *batch,
mtu = ETH_PAYLOAD_MAX;
}
- if (userspace_tso_enabled()) {
- /* Allocate TSO packets. The packet has enough headroom to store
- * a full non-TSO packet. When a TSO packet is received, the data
- * from non-TSO buffer (std_len) is prepended to the TSO packet
- * (aux_buf). */
- size_t std_len = sizeof(struct virtio_net_hdr) + VLAN_ETH_HEADER_LEN
- + DP_NETDEV_HEADROOM + mtu;
- size_t data_len = LINUX_RXQ_TSO_MAX_LEN - std_len;
- for (int i = 0; i < NETDEV_MAX_BURST; i++) {
- if (rx->aux_bufs[i]) {
- continue;
- }
-
- rx->aux_bufs[i] = dp_packet_new_with_headroom(data_len, std_len);
+ /* Allocate TSO packets. The packet has enough headroom to store
+ * a full non-TSO packet. When a TSO packet is received, the data
+ * from non-TSO buffer (std_len) is prepended to the TSO packet
+ * (aux_buf). */
+ size_t std_len = sizeof(struct virtio_net_hdr) + VLAN_ETH_HEADER_LEN
+ + DP_NETDEV_HEADROOM + mtu;
+ size_t data_len = LINUX_RXQ_TSO_MAX_LEN - std_len;
+ for (int i = 0; i < NETDEV_MAX_BURST; i++) {
+ if (rx->aux_bufs[i]) {
+ continue;
}
+
+ rx->aux_bufs[i] = dp_packet_new_with_headroom(data_len, std_len);
}
dp_packet_batch_init(batch);
@@ -1550,7 +1512,7 @@ netdev_linux_rxq_drain(struct netdev_rxq *rxq_)
static int
netdev_linux_sock_batch_send(struct netdev *netdev_, int sock, int ifindex,
- bool tso, int mtu, struct dp_packet_batch *batch)
+ int mtu, struct dp_packet_batch *batch)
{
struct netdev_linux *netdev = netdev_linux_cast(netdev_);
const size_t size = dp_packet_batch_size(batch);
@@ -1563,17 +1525,13 @@ netdev_linux_sock_batch_send(struct netdev *netdev_, int sock, int ifindex,
struct iovec *iov = xmalloc(sizeof(*iov) * size);
struct dp_packet *packet;
int cnt = 0;
+ int ret;
DP_PACKET_BATCH_FOR_EACH (i, packet, batch) {
- if (tso) {
- int ret = netdev_linux_prepend_vnet_hdr(packet, mtu);
-
- if (OVS_UNLIKELY(ret)) {
- netdev->tx_dropped += 1;
- VLOG_WARN_RL(&rl, "%s: Packet dropped. %s",
- netdev_get_name(netdev_), ovs_strerror(ret));
- continue;
- }
+ ret = netdev_linux_prepend_vnet_hdr(packet, mtu);
+ if (OVS_UNLIKELY(ret)) {
+ netdev->tx_dropped += 1;
+ continue;
}
iov[cnt].iov_base = dp_packet_data(packet);
@@ -1738,15 +1696,11 @@ netdev_linux_send(struct netdev *netdev_, int qid OVS_UNUSED,
struct dp_packet_batch *batch,
bool concurrent_txq OVS_UNUSED)
{
- bool tso = userspace_tso_enabled();
int mtu = ETH_PAYLOAD_MAX;
int error = 0;
int sock = 0;
- if (tso) {
- netdev_linux_get_mtu__(netdev_linux_cast(netdev_), &mtu);
- }
-
+ netdev_linux_get_mtu__(netdev_linux_cast(netdev_), &mtu);
if (!is_tap_netdev(netdev_)) {
if (netdev_linux_netnsid_is_remote(netdev_linux_cast(netdev_))) {
error = EOPNOTSUPP;
@@ -1765,7 +1719,7 @@ netdev_linux_send(struct netdev *netdev_, int qid OVS_UNUSED,
goto free_batch;
}
- error = netdev_linux_sock_batch_send(netdev_, sock, ifindex, tso, mtu,
+ error = netdev_linux_sock_batch_send(netdev_, sock, ifindex, mtu,
batch);
} else {
error = netdev_linux_tap_batch_send(netdev_, mtu, batch);
@@ -6559,7 +6513,7 @@ af_packet_sock(void)
if (error) {
close(sock);
sock = -error;
- } else if (userspace_tso_enabled()) {
+ } else {
int val = 1;
error = setsockopt(sock, SOL_PACKET, PACKET_VNET_HDR, &val,
sizeof val);
@@ -6615,13 +6569,6 @@ netdev_linux_parse_vnet_hdr(struct dp_packet *b)
switch (vnet->gso_type) {
case VIRTIO_NET_HDR_GSO_TCPV4:
case VIRTIO_NET_HDR_GSO_TCPV6:
- if (OVS_UNLIKELY(!userspace_tso_enabled())) {
- VLOG_WARN_RL(&rl, "Received an unsupported packet with TSO "
- "enabled.");
- ret = ENOTSUP;
- break;
- }
-
/* The packet has offloaded TCP segmentation. */
dp_packet_set_tso_segsz(b, vnet->gso_size);
dp_packet_ol_set_tcp_seg(b);
@@ -56,7 +56,6 @@
#include "svec.h"
#include "openvswitch/vlog.h"
#include "flow.h"
-#include "userspace-tso.h"
#include "util.h"
#ifdef __linux__
#include "tc.h"
@@ -887,8 +886,7 @@ netdev_send(struct netdev *netdev, int qid, struct dp_packet_batch *batch,
struct dp_packet *packet;
int error;
- if (userspace_tso_enabled() &&
- !(netdev_flags & NETDEV_OFFLOAD_TX_TCP_TSO)) {
+ if (!(netdev_flags & NETDEV_OFFLOAD_TX_TCP_TSO)) {
DP_PACKET_BATCH_FOR_EACH(i, packet, batch) {
if (dp_packet_ol_tcp_seg(packet)) {
return netdev_send_tso(netdev, qid, batch, concurrent_txq);
deleted file mode 100644
@@ -1,48 +0,0 @@
-/*
- * Copyright (c) 2020 Red Hat, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <config.h>
-
-#include "smap.h"
-#include "ovs-thread.h"
-#include "openvswitch/vlog.h"
-#include "dpdk.h"
-#include "userspace-tso.h"
-#include "vswitch-idl.h"
-
-VLOG_DEFINE_THIS_MODULE(userspace_tso);
-
-static bool userspace_tso = false;
-
-void
-userspace_tso_init(const struct smap *ovs_other_config)
-{
- if (smap_get_bool(ovs_other_config, "userspace-tso-enable", false)) {
- static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
-
- if (ovsthread_once_start(&once)) {
- VLOG_INFO("Userspace TCP Segmentation Offloading support enabled");
- userspace_tso = true;
- ovsthread_once_done(&once);
- }
- }
-}
-
-bool
-userspace_tso_enabled(void)
-{
- return userspace_tso;
-}
deleted file mode 100644
@@ -1,23 +0,0 @@
-/*
- * Copyright (c) 2020 Red Hat Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef USERSPACE_TSO_H
-#define USERSPACE_TSO_H 1
-
-void userspace_tso_init(const struct smap *ovs_other_config);
-bool userspace_tso_enabled(void);
-
-#endif /* userspace-tso.h */
@@ -65,7 +65,6 @@
#include "system-stats.h"
#include "timeval.h"
#include "tnl-ports.h"
-#include "userspace-tso.h"
#include "util.h"
#include "unixctl.h"
#include "lib/vswitch-idl.h"
@@ -3291,7 +3290,6 @@ bridge_run(void)
if (cfg) {
netdev_set_flow_api_enabled(&cfg->other_config);
dpdk_init(&cfg->other_config);
- userspace_tso_init(&cfg->other_config);
}
/* Initialize the ofproto library. This only needs to run once, but
@@ -742,26 +742,6 @@
The default value is <code>25%</code>.
</p>
</column>
- <column name="other_config" key="userspace-tso-enable"
- type='{"type": "boolean"}'>
- <p>
- Set this value to <code>true</code> to enable userspace support for
- TCP Segmentation Offloading (TSO). When it is enabled, the interfaces
- can provide an oversized TCP segment to the datapath and the datapath
- will offload the TCP segmentation and checksum calculation to the
- interfaces when necessary.
- </p>
- <p>
- The default value is <code>false</code>. Changing this value requires
- restarting the daemon.
- </p>
- <p>
- The feature only works if Open vSwitch is built with DPDK support.
- </p>
- <p>
- The feature is considered experimental.
- </p>
- </column>
</group>
<group title="Status">
<column name="next_cfg">
Now that there is a segmentation in software as a fall back in case a netdev doesn't support TCP segmentation offloading (TSO), enable it by default on all possible netdevs. This patch showcase the idea, but it can't really be applied because it doesn't support encapsulated packets yet. Either it would have to enable that support first or provide a switch to turn on/off globally depending on the use case. This patch is good to also measure performance with P2P and PVP and check if there are regressions before continue the work. The encapsulated traffic is challenging because DPDK ports require pointers to inner headers [1] and OVS doesn't support them at the moment. We could store the pointers when the packet is encapsulated, but then any further change in the packet headers may or may not cause the inner pointers to change too. Another requirement not present here is the control of the features (csum, TSO) per port. That can be done, but for example if a vhost-user port has TSO turned off, then the software segmentation is used. Currently that allocates packets from normal memory, so DPDK would have to copy (dpdk_do_tx_copy) each packet to send out on another DPDK port. [1] https://doc.dpdk.org/guides/prog_guide/mbuf_lib.html#meta-information Signed-off-by: Flavio Leitner <fbl@sysclose.org> --- Documentation/topics/userspace-tso.rst | 12 -- lib/automake.mk | 2 - lib/netdev-dpdk.c | 56 +++------ lib/netdev-linux.c | 155 ++++++++----------------- lib/netdev.c | 4 +- lib/userspace-tso.c | 48 -------- lib/userspace-tso.h | 23 ---- vswitchd/bridge.c | 2 - vswitchd/vswitch.xml | 20 ---- 9 files changed, 68 insertions(+), 254 deletions(-) delete mode 100644 lib/userspace-tso.c delete mode 100644 lib/userspace-tso.h