diff mbox series

[ovs-dev,[PATCH,RFC] 16/17] Add Generic Segmentation Offloading.

Message ID 20211207165156.705727-17-fbl@sysclose.org
State RFC
Headers show
Series [ovs-dev,[PATCH,RFC] 16/17] Add Generic Segmentation Offloading. | expand

Commit Message

Flavio Leitner Dec. 7, 2021, 4:51 p.m. UTC
This provides a software implementation in the case
the egress netdev doesn't support segmentation in hardware.

This is an _untested_ patch to showcase the proposed solution.

The challenge here is to guarantee packet ordering in the
original batch that may be full of TSO packets. Each TSO
packet can go up to ~64kB, so with segment size of 1440
that means about 44 packets for each TSO. Each batch has
32 packets, so the total batch amounts to 1408 normal
packets.

The segmentation estimates the total number of packets
and then the total number of batches. Then allocate
enough memory and finally do the work.

Finally each batch is sent in order to the netdev.

Signed-off-by: Flavio Leitner <fbl@sysclose.org>
---
 lib/automake.mk     |   2 +
 lib/dp-packet-gso.c | 153 ++++++++++++++++++++++++++++++++++++++++++++
 lib/dp-packet-gso.h |  24 +++++++
 lib/dp-packet.h     |   7 ++
 lib/netdev.c        | 122 +++++++++++++++++++++--------------
 5 files changed, 259 insertions(+), 49 deletions(-)
 create mode 100644 lib/dp-packet-gso.c
 create mode 100644 lib/dp-packet-gso.h
diff mbox series

Patch

diff --git a/lib/automake.mk b/lib/automake.mk
index 46f869a33..2ca94e13c 100644
--- a/lib/automake.mk
+++ b/lib/automake.mk
@@ -107,6 +107,8 @@  lib_libopenvswitch_la_SOURCES = \
 	lib/dpctl.h \
 	lib/dp-packet.h \
 	lib/dp-packet.c \
+	lib/dp-packet-gso.c \
+	lib/dp-packet-gso.h \
 	lib/dpdk.h \
 	lib/dpif-netdev-extract-study.c \
 	lib/dpif-netdev-lookup.h \
diff --git a/lib/dp-packet-gso.c b/lib/dp-packet-gso.c
new file mode 100644
index 000000000..fcc35b100
--- /dev/null
+++ b/lib/dp-packet-gso.c
@@ -0,0 +1,153 @@ 
+/*
+ * Copyright (c) 2021 Red Hat, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <config.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "coverage.h"
+#include "dp-packet.h"
+#include "dp-packet-gso.h"
+#include "netdev-provider.h"
+
+COVERAGE_DEFINE(soft_seg_good);
+
+/* Retuns a new packet that is a segment of packet 'p'.
+ *
+ * The new packet is initialized with 'hdr_len' bytes from the
+ * start of packet 'p' and then appended with 'data_len' bytes
+ * from the 'data' buffer.
+ *
+ * Note: The packet headers are not updated. */
+static struct dp_packet *
+dp_packet_gso_seg_new(const struct dp_packet *p, size_t hdr_len,
+                      const char *data, size_t data_len)
+{
+    struct dp_packet *seg = dp_packet_new_with_headroom(hdr_len + data_len,
+                                                        dp_packet_headroom(p));
+
+    /* Append the original packet headers and then the payload. */
+    dp_packet_put(seg, dp_packet_data(p), hdr_len);
+    dp_packet_put(seg, data, data_len);
+
+    /* The new segment should have the same offsets. */
+    seg->l2_5_ofs = p->l2_5_ofs;
+    seg->l3_ofs = p->l3_ofs;
+    seg->l4_ofs = p->l4_ofs;
+
+    /* The protocol headers remain the same, so preserve hash and mark. */
+    *dp_packet_rss_ptr(seg) = dp_packet_get_rss_hash(p);
+    *dp_packet_flow_mark_ptr(seg) = *dp_packet_flow_mark_ptr(p);
+
+    /* The segment should inherit all the offloading flags from the
+     * original packet, except for the TCP segmentation flag. */
+    *dp_packet_ol_flags_ptr(seg) =  *dp_packet_ol_flags_ptr(p);
+    dp_packet_ol_reset_tcp_seg(seg);
+
+    return seg;
+}
+
+/* Returns the calculated number of TCP segments in packet 'p'. */
+int
+dp_packet_gso_nr_segs(struct dp_packet *p)
+{
+    uint16_t segsz = dp_packet_get_tso_segsz(p);
+    const char *data_tail;
+    const char *data_pos;
+    int n_segs;
+
+    data_pos = dp_packet_get_tcp_payload(p);
+    data_tail = (char *) dp_packet_tail(p) - dp_packet_l2_pad_size(p);
+    data_pos = dp_packet_get_tcp_payload(p);
+    n_segs = DIV_ROUND_UP((data_tail - data_pos), segsz);
+
+    return n_segs;
+
+}
+
+/* Perform software segmentation on packet 'p'.
+ *
+ * Returns all the segments added to the array of preallocated
+ * batches in 'batches' starting at batch position 'batch_pos'. */
+void
+dp_packet_gso(struct dp_packet *p, struct dp_packet_batch *batches,
+              size_t *batch_pos)
+{
+    struct tcp_header *tcp_hdr;
+    struct ip_header *ip_hdr;
+    struct dp_packet *seg;
+    uint32_t tcp_seq;
+    uint16_t ip_id;
+    int hdr_len;
+
+    tcp_hdr = dp_packet_l4(p);
+    tcp_seq = ntohl(get_16aligned_be32(&tcp_hdr->tcp_seq));
+    hdr_len = ((char *)dp_packet_l4(p) - (char *)dp_packet_eth(p))
+              + TCP_OFFSET(tcp_hdr->tcp_ctl) * 4;
+    ip_id = 0;
+    if (dp_packet_ol_tx_ipv4(p)) {
+        ip_hdr = dp_packet_l3(p);
+        ip_id = ntohs(ip_hdr->ip_id);
+    }
+
+    uint16_t tso_segsz = dp_packet_get_tso_segsz(p);
+    const char *data_tail = (char *) dp_packet_tail(p)
+                            - dp_packet_l2_pad_size(p);
+    const char *data_pos = dp_packet_get_tcp_payload(p);
+    int n_segs = dp_packet_gso_nr_segs(p);
+    int seg_len;
+    for (int i = 0; i < n_segs; i++) {
+        seg_len = data_tail - data_pos;
+        if (seg_len > tso_segsz) {
+            seg_len = tso_segsz;
+        }
+
+        seg = dp_packet_gso_seg_new(p, hdr_len, data_pos, seg_len);
+        data_pos += seg_len;
+
+        /* Update L3 header. */
+        if (dp_packet_ol_tx_ipv4(seg)) {
+            ip_hdr = dp_packet_l3(seg);
+            ip_hdr->ip_tot_len = htons(seg_len);
+            ip_hdr->ip_id = htons(ip_id);
+            ip_hdr->ip_csum = 0;
+            ip_id++;
+        } else {
+            struct ovs_16aligned_ip6_hdr *ip6_hdr = dp_packet_l3(seg);
+
+            ip6_hdr->ip6_ctlun.ip6_un1.ip6_un1_plen = htons(seg_len);
+        }
+
+        /* Update L4 header. */
+        tcp_hdr = dp_packet_l4(seg);
+        put_16aligned_be32(&tcp_hdr->tcp_seq, htonl(tcp_seq));
+        tcp_seq += dp_packet_l4_size(seg) + TCP_OFFSET(tcp_hdr->tcp_ctl) * 4;
+        if (OVS_LIKELY(i < (n_segs - 1))) {
+            /* Reset flags PUSH and FIN unless it is the last segment. */
+            uint16_t tcp_flags = TCP_FLAGS(tcp_hdr->tcp_ctl)
+                                 & ~(TCP_PSH|TCP_FIN);
+            tcp_hdr->tcp_ctl = TCP_CTL(tcp_flags, 5);
+        }
+
+        if (dp_packet_batch_is_full(&batches[*batch_pos])) {
+            *batch_pos += 1;
+        }
+
+        dp_packet_batch_add(&batches[*batch_pos], seg);
+    }
+
+    COVERAGE_INC(soft_seg_good);
+}
diff --git a/lib/dp-packet-gso.h b/lib/dp-packet-gso.h
new file mode 100644
index 000000000..81cb52742
--- /dev/null
+++ b/lib/dp-packet-gso.h
@@ -0,0 +1,24 @@ 
+/*
+ * Copyright (c) 2021 Red Hat, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef DP_PACKET_GSO_H
+#define DP_PACKET_GSO_H 1
+
+void dp_packet_gso(struct dp_packet *p, struct dp_packet_batch *batches,
+                   size_t *batch_pos);
+int dp_packet_gso_nr_segs(struct dp_packet *p);
+
+#endif /* dp-packet-gso.h */
diff --git a/lib/dp-packet.h b/lib/dp-packet.h
index 27529ca87..e45ada1c7 100644
--- a/lib/dp-packet.h
+++ b/lib/dp-packet.h
@@ -1104,6 +1104,13 @@  dp_packet_ol_set_tcp_seg(struct dp_packet *p)
     *dp_packet_ol_flags_ptr(p) |= DP_PACKET_OL_TX_TCP_SEG;
 }
 
+/* Resets TCP Segmentation flag in packet 'p'. */
+static inline void
+dp_packet_ol_reset_tcp_seg(struct dp_packet *p)
+{
+    *dp_packet_ol_flags_ptr(p) &= ~DP_PACKET_OL_TX_TCP_SEG;
+}
+
 /* Returns 'true' is the IP has good integrity and the
  * checksum in it is complete. */
 static inline bool
diff --git a/lib/netdev.c b/lib/netdev.c
index fffd72f5f..2eeed9a40 100644
--- a/lib/netdev.c
+++ b/lib/netdev.c
@@ -35,6 +35,7 @@ 
 #include "coverage.h"
 #include "dpif.h"
 #include "dp-packet.h"
+#include "dp-packet-gso.h"
 #include "openvswitch/dynamic-string.h"
 #include "fatal-signal.h"
 #include "hash.h"
@@ -55,6 +56,7 @@ 
 #include "svec.h"
 #include "openvswitch/vlog.h"
 #include "flow.h"
+#include "userspace-tso.h"
 #include "util.h"
 #ifdef __linux__
 #include "tc.h"
@@ -66,6 +68,7 @@  COVERAGE_DEFINE(netdev_received);
 COVERAGE_DEFINE(netdev_sent);
 COVERAGE_DEFINE(netdev_add_router);
 COVERAGE_DEFINE(netdev_get_stats);
+COVERAGE_DEFINE(netdev_send_tcp_seg_drops);
 COVERAGE_DEFINE(netdev_send_prepare_drops);
 COVERAGE_DEFINE(netdev_push_header_drops);
 
@@ -785,59 +788,70 @@  netdev_get_pt_mode(const struct netdev *netdev)
             : NETDEV_PT_LEGACY_L2);
 }
 
-/* Check if a 'packet' is compatible with 'netdev_flags'.
- * If a packet is incompatible, return 'false' with the 'errormsg'
- * pointing to a reason. */
-static bool
-netdev_send_prepare_packet(const uint64_t netdev_flags,
-                           struct dp_packet *packet, char **errormsg)
-{
-    if (dp_packet_ol_tcp_seg(packet)
-        && !(netdev_flags & NETDEV_OFFLOAD_TX_TCP_TSO)) {
-            /* Fall back to GSO in software. */
-            VLOG_ERR_BUF(errormsg, "No TSO support");
-            return false;
-    }
-
-    /* Packet with IP csum offloading enabled was received with verified csum.
-     * Leave the IP csum offloading enabled even with good checksum to the
-     * netdev to decide what would be the best to do.
-     * Provide a software fallback in case the device doesn't support IP csum
-     * offloading. Note: Encapsulated packet must have the inner IP header
-     * csum already calculated.
-     * Packet with L4 csum offloading enabled was received with verified csum.
-     * Leave the L4 csum offloading enabled even with good checksum for the
-     * netdev to decide what would be the best to do.
-     * Netdev that requires pseudo header csum needs to calculate that.
-     * Provide a software fallback in case the netdev doesn't support L4 csum
-     * offloading. Note: Encapsulated packet must have the inner L4 header
-     * csum already calculated. */
-    dp_packet_ol_send_prepare(packet, netdev_flags);
-    return true;
-}
-
-/* Check if each packet in 'batch' is compatible with 'netdev' features,
- * otherwise either fall back to software implementation or drop it. */
-static void
-netdev_send_prepare_batch(const struct netdev *netdev,
-                          struct dp_packet_batch *batch)
+static int
+netdev_send_tso(struct netdev *netdev, int qid,
+                struct dp_packet_batch *batch, bool concurrent_txq)
 {
+    struct dp_packet_batch *batches;
     struct dp_packet *packet;
-    size_t i, size = dp_packet_batch_size(batch);
-
-    DP_PACKET_BATCH_REFILL_FOR_EACH (i, size, packet, batch) {
-        char *errormsg = NULL;
+    int n_packets;
+    int n_batches;
+    int error;
 
-        if (netdev_send_prepare_packet(netdev->ol_flags, packet, &errormsg)) {
-            dp_packet_batch_refill(batch, packet, i);
+    /* Calculate the total number of packets in the batch after
+     * the segmentation. */
+    n_packets = 0;
+    DP_PACKET_BATCH_FOR_EACH (i, packet, batch) {
+        if (dp_packet_ol_tcp_seg(packet)) {
+            n_packets += dp_packet_gso_nr_segs(packet);
         } else {
+            n_packets++;
+        }
+    }
+
+    if (!n_packets) {
+        return 0;
+    }
+
+    /* Allocate enough batches to store all the packets in order. */
+    n_batches = DIV_ROUND_UP(n_packets, NETDEV_MAX_BURST);
+    batches = xmalloc(n_batches * sizeof(struct dp_packet_batch));
+    size_t batch_pos = 0;
+    for (batch_pos = 0; batch_pos < n_batches; batch_pos++) {
+        dp_packet_batch_init(&batches[batch_pos]);
+    }
+
+    /* Do the packet segmentation if TSO is flagged. */
+    size_t size = dp_packet_batch_size(batch);
+    size_t k;
+    batch_pos = 0;
+    DP_PACKET_BATCH_REFILL_FOR_EACH (k, size, packet, batch) {
+        if (dp_packet_ol_tcp_seg(packet)) {
+            dp_packet_gso(packet, batches, &batch_pos);
             dp_packet_delete(packet);
-            COVERAGE_INC(netdev_send_prepare_drops);
-            VLOG_WARN_RL(&rl, "%s: Packet dropped: %s",
-                         netdev_get_name(netdev), errormsg);
-            free(errormsg);
+        } else {
+            if (dp_packet_batch_is_full(&batches[batch_pos])) {
+                batch_pos++;
+            }
+
+            dp_packet_batch_add(&batches[batch_pos], packet);
+        }
+    }
+
+    for (batch_pos = 0; batch_pos < n_batches; batch_pos++) {
+        DP_PACKET_BATCH_FOR_EACH (i, packet, (&batches[batch_pos])) {
+            dp_packet_ol_send_prepare(packet, netdev->ol_flags);
+        }
+
+        error = netdev->netdev_class->send(netdev, qid, &batches[batch_pos],
+                                           concurrent_txq);
+        if (!error) {
+            COVERAGE_INC(netdev_sent);
         }
     }
+
+    free(batches);
+    return 0;
 }
 
 /* Sends 'batch' on 'netdev'.  Returns 0 if successful (for every packet),
@@ -869,11 +883,21 @@  int
 netdev_send(struct netdev *netdev, int qid, struct dp_packet_batch *batch,
             bool concurrent_txq)
 {
+    const uint64_t netdev_flags = netdev->ol_flags;
+    struct dp_packet *packet;
     int error;
 
-    netdev_send_prepare_batch(netdev, batch);
-    if (OVS_UNLIKELY(dp_packet_batch_is_empty(batch))) {
-        return 0;
+    if (userspace_tso_enabled() &&
+        !(netdev_flags & NETDEV_OFFLOAD_TX_TCP_TSO)) {
+        DP_PACKET_BATCH_FOR_EACH(i, packet, batch) {
+            if (dp_packet_ol_tcp_seg(packet)) {
+                return netdev_send_tso(netdev, qid, batch, concurrent_txq);
+            }
+        }
+    }
+
+    DP_PACKET_BATCH_FOR_EACH(i, packet, batch) {
+        dp_packet_ol_send_prepare(packet, netdev_flags);
     }
 
     error = netdev->netdev_class->send(netdev, qid, batch, concurrent_txq);