@@ -76,7 +76,7 @@ dp_packet_gso_nr_segs(struct dp_packet *p)
const char *data_tail;
const char *data_pos;
- if (dp_packet_hwol_is_tunnel(p)) {
+ if (dp_packet_tunnel(p)) {
data_pos = dp_packet_get_inner_tcp_payload(p);
} else {
data_pos = dp_packet_get_tcp_payload(p);
@@ -108,9 +108,9 @@ dp_packet_gso(struct dp_packet *p, struct dp_packet_batch **batches)
bool outer_ipv4;
int hdr_len;
int seg_len;
- bool udp_tnl = dp_packet_hwol_is_tunnel_vxlan(p) ||
- dp_packet_hwol_is_tunnel_geneve(p);
- bool gre_tnl = dp_packet_hwol_is_tunnel_gre(p);
+ bool udp_tnl = dp_packet_tunnel_vxlan(p)
+ || dp_packet_tunnel_geneve(p);
+ bool gre_tnl = dp_packet_tunnel_gre(p);
tso_segsz = dp_packet_get_tso_segsz(p);
if (!tso_segsz) {
@@ -561,7 +561,7 @@ dp_packet_ol_send_prepare(struct dp_packet *p, uint64_t flags)
return;
}
- if (!dp_packet_hwol_is_tunnel(p)) {
+ if (!dp_packet_tunnel(p)) {
if (dp_packet_hwol_tx_ip_csum(p)) {
if (dp_packet_ip_checksum_good(p)) {
dp_packet_hwol_reset_tx_ip_csum(p);
@@ -599,8 +599,8 @@ dp_packet_ol_send_prepare(struct dp_packet *p, uint64_t flags)
return;
}
- if (dp_packet_hwol_is_tunnel_geneve(p) ||
- dp_packet_hwol_is_tunnel_vxlan(p)) {
+ if (dp_packet_tunnel_geneve(p)
+ || dp_packet_tunnel_vxlan(p)) {
/* If the TX interface doesn't support UDP tunnel offload but does
* support inner checksum offload and an outer UDP checksum is
@@ -68,21 +68,12 @@ enum {
DEF_OL_FLAG(DP_PACKET_OL_TX_SCTP_CKSUM, RTE_MBUF_F_TX_SCTP_CKSUM, 0x800),
/* Offload IP checksum. */
DEF_OL_FLAG(DP_PACKET_OL_TX_IP_CKSUM, RTE_MBUF_F_TX_IP_CKSUM, 0x1000),
- /* Offload packet is tunnel GENEVE. */
- DEF_OL_FLAG(DP_PACKET_OL_TX_TUNNEL_GENEVE,
- RTE_MBUF_F_TX_TUNNEL_GENEVE, 0x2000),
- /* Offload packet is tunnel VXLAN. */
- DEF_OL_FLAG(DP_PACKET_OL_TX_TUNNEL_VXLAN,
- RTE_MBUF_F_TX_TUNNEL_VXLAN, 0x4000),
/* Offload tunnel outer IPv4 checksum. */
DEF_OL_FLAG(DP_PACKET_OL_TX_OUTER_IP_CKSUM,
RTE_MBUF_F_TX_OUTER_IP_CKSUM, 0x10000),
/* Offload tunnel outer UDP checksum. */
DEF_OL_FLAG(DP_PACKET_OL_TX_OUTER_UDP_CKSUM,
RTE_MBUF_F_TX_OUTER_UDP_CKSUM, 0x20000),
- /* Offload packet is GRE tunnel. */
- DEF_OL_FLAG(DP_PACKET_OL_TX_TUNNEL_GRE,
- RTE_MBUF_F_TX_TUNNEL_GRE, 0x80000),
/* Adding new field requires adding to DP_PACKET_OL_SUPPORTED_MASK. */
};
@@ -92,9 +83,6 @@ enum {
DP_PACKET_OL_TX_UDP_CKSUM | \
DP_PACKET_OL_TX_SCTP_CKSUM | \
DP_PACKET_OL_TX_IP_CKSUM | \
- DP_PACKET_OL_TX_TUNNEL_GENEVE | \
- DP_PACKET_OL_TX_TUNNEL_VXLAN | \
- DP_PACKET_OL_TX_TUNNEL_GRE | \
DP_PACKET_OL_TX_OUTER_IP_CKSUM | \
DP_PACKET_OL_TX_OUTER_UDP_CKSUM)
@@ -117,6 +105,10 @@ enum OVS_PACKED_ENUM dp_packet_offload_mask {
DP_PACKET_OL_L4_CKSUM_BAD = UINT16_C(1) << 3,
/* Valid L4 checksum in the packet. */
DP_PACKET_OL_L4_CKSUM_GOOD = UINT16_C(1) << 8,
+
+ /* Bits for marking a packet as tunneled. */
+ DP_PACKET_OL_TUNNEL_GENEVE = UINT16_C(1) << 11,
+ DP_PACKET_OL_TUNNEL_VXLAN = UINT16_C(1) << 12,
};
#ifdef DPDK_NETDEV
@@ -131,6 +123,9 @@ BUILD_ASSERT_DECL(DP_PACKET_OL_L4_CKSUM_GOOD == RTE_MBUF_F_RX_L4_CKSUM_GOOD);
#define DP_PACKET_OL_L4_CKSUM_MASK (DP_PACKET_OL_L4_CKSUM_GOOD \
| DP_PACKET_OL_L4_CKSUM_BAD)
+#define DP_PACKET_OL_TUNNEL_MASK (DP_PACKET_OL_TUNNEL_GENEVE \
+ | DP_PACKET_OL_TUNNEL_VXLAN)
+
/* Buffer for holding packet data. A dp_packet is automatically reallocated
* as necessary if it grows too large for the available memory.
* By default the packet type is set to Ethernet (PT_ETH).
@@ -1076,6 +1071,53 @@ dp_packet_set_flow_mark(struct dp_packet *p, uint32_t mark)
p->has_mark = true;
}
+static inline bool OVS_WARN_UNUSED_RESULT
+dp_packet_tunnel_geneve(const struct dp_packet *b)
+{
+ return (b->offloads & DP_PACKET_OL_TUNNEL_MASK)
+ == DP_PACKET_OL_TUNNEL_GENEVE;
+}
+
+static inline void
+dp_packet_tunnel_set_geneve(struct dp_packet *b)
+{
+ b->offloads &= ~DP_PACKET_OL_TUNNEL_VXLAN;
+ b->offloads |= DP_PACKET_OL_TUNNEL_GENEVE;
+}
+
+static inline bool OVS_WARN_UNUSED_RESULT
+dp_packet_tunnel_vxlan(const struct dp_packet *b)
+{
+ return (b->offloads & DP_PACKET_OL_TUNNEL_MASK)
+ == DP_PACKET_OL_TUNNEL_VXLAN;
+}
+
+static inline void
+dp_packet_tunnel_set_vxlan(struct dp_packet *b)
+{
+ b->offloads &= ~DP_PACKET_OL_TUNNEL_GENEVE;
+ b->offloads |= DP_PACKET_OL_TUNNEL_VXLAN;
+}
+
+static inline bool OVS_WARN_UNUSED_RESULT
+dp_packet_tunnel_gre(const struct dp_packet *b)
+{
+ return (b->offloads & DP_PACKET_OL_TUNNEL_MASK)
+ == DP_PACKET_OL_TUNNEL_MASK;
+}
+
+static inline void
+dp_packet_tunnel_set_gre(struct dp_packet *b)
+{
+ b->offloads |= DP_PACKET_OL_TUNNEL_MASK;
+}
+
+static inline bool OVS_WARN_UNUSED_RESULT
+dp_packet_tunnel(const struct dp_packet *b)
+{
+ return !!(b->offloads & DP_PACKET_OL_TUNNEL_MASK);
+}
+
/* Returns the L4 cksum offload bitmask. */
static inline uint64_t
dp_packet_hwol_l4_mask(const struct dp_packet *b)
@@ -1121,38 +1163,6 @@ dp_packet_hwol_l4_is_sctp(struct dp_packet *b)
DP_PACKET_OL_TX_SCTP_CKSUM;
}
-/* Returns 'true' if packet 'b' is marked for tunnel GENEVE
- * checksum offloading. */
-static inline bool
-dp_packet_hwol_is_tunnel_geneve(struct dp_packet *b)
-{
- return !!(*dp_packet_ol_flags_ptr(b) & DP_PACKET_OL_TX_TUNNEL_GENEVE);
-}
-
-/* Returns 'true' if packet 'b' is marked for tunnel VXLAN
- * checksum offloading. */
-static inline bool
-dp_packet_hwol_is_tunnel_vxlan(struct dp_packet *b)
-{
- return !!(*dp_packet_ol_flags_ptr(b) & DP_PACKET_OL_TX_TUNNEL_VXLAN);
-}
-
-/* Returns 'true' if packet 'b' is marked for GRE tunnel offloading. */
-static inline bool
-dp_packet_hwol_is_tunnel_gre(struct dp_packet *b)
-{
- return !!(*dp_packet_ol_flags_ptr(b) & DP_PACKET_OL_TX_TUNNEL_GRE);
-}
-
-/* Returns true if packet 'b' has any offloadable tunnel type. */
-static inline bool
-dp_packet_hwol_is_tunnel(const struct dp_packet *b)
-{
- return !!(*dp_packet_ol_flags_ptr(b) & (DP_PACKET_OL_TX_TUNNEL_VXLAN |
- DP_PACKET_OL_TX_TUNNEL_GRE |
- DP_PACKET_OL_TX_TUNNEL_GENEVE));
-}
-
/* Returns 'true' if packet 'b' is marked for outer IPv4 checksum offload. */
static inline bool
dp_packet_hwol_is_outer_ipv4_cksum(const struct dp_packet *b)
@@ -1233,27 +1243,6 @@ dp_packet_hwol_set_tcp_seg(struct dp_packet *b)
*dp_packet_ol_flags_ptr(b) |= DP_PACKET_OL_TX_TCP_SEG;
}
-/* Mark packet 'b' for tunnel GENEVE offloading. */
-static inline void
-dp_packet_hwol_set_tunnel_geneve(struct dp_packet *b)
-{
- *dp_packet_ol_flags_ptr(b) |= DP_PACKET_OL_TX_TUNNEL_GENEVE;
-}
-
-/* Mark packet 'b' for tunnel VXLAN offloading. */
-static inline void
-dp_packet_hwol_set_tunnel_vxlan(struct dp_packet *b)
-{
- *dp_packet_ol_flags_ptr(b) |= DP_PACKET_OL_TX_TUNNEL_VXLAN;
-}
-
-/* Mark packet 'b' for GRE tunnel offloading. */
-static inline void
-dp_packet_hwol_set_tunnel_gre(struct dp_packet *b)
-{
- *dp_packet_ol_flags_ptr(b) |= DP_PACKET_OL_TX_TUNNEL_GRE;
-}
-
/* Mark packet 'b' for csum offloading in outer IPv4 header. */
static inline void
dp_packet_hwol_set_tx_outer_ipv4_csum(struct dp_packet *b)
@@ -1292,7 +1281,7 @@ dp_packet_hwol_reset_tcp_seg(struct dp_packet *p)
ol_flags &= ~DP_PACKET_OL_TX_TCP_SEG;
p->offloads &= ~(DP_PACKET_OL_L4_CKSUM_GOOD | DP_PACKET_OL_IP_CKSUM_GOOD);
- if (dp_packet_hwol_is_tunnel(p)) {
+ if (dp_packet_tunnel(p)) {
ip_hdr = dp_packet_inner_l3(p);
} else {
ip_hdr = dp_packet_l3(p);
@@ -1301,14 +1290,14 @@ dp_packet_hwol_reset_tcp_seg(struct dp_packet *p)
ol_flags |= DP_PACKET_OL_TX_IP_CKSUM;
}
- if (dp_packet_hwol_is_tunnel(p)) {
+ if (dp_packet_tunnel(p)) {
ip_hdr = dp_packet_l3(p);
if (IP_VER(ip_hdr->ip_ihl_ver) == 4) {
ol_flags |= DP_PACKET_OL_TX_OUTER_IP_CKSUM;
}
- if (dp_packet_hwol_is_tunnel_geneve(p)
- || dp_packet_hwol_is_tunnel_vxlan(p)) {
+ if (dp_packet_tunnel_geneve(p)
+ || dp_packet_tunnel_vxlan(p)) {
ol_flags |= DP_PACKET_OL_TX_OUTER_UDP_CKSUM;
}
}
@@ -1363,7 +1352,7 @@ dp_packet_hwol_l3_csum_ipv4_ol(const struct dp_packet *b)
{
const struct ip_header *ip_hdr;
- if (dp_packet_hwol_is_tunnel(b)) {
+ if (dp_packet_tunnel(b)) {
ip_hdr = dp_packet_l3(b);
if (IP_VER(ip_hdr->ip_ihl_ver) == 4) {
return dp_packet_hwol_is_outer_ipv4_cksum(b);
@@ -1384,7 +1373,7 @@ dp_packet_hwol_l3_ipv4(const struct dp_packet *b)
{
const struct ip_header *ip_hdr;
- if (dp_packet_hwol_is_tunnel(b)) {
+ if (dp_packet_tunnel(b)) {
ip_hdr = dp_packet_l3(b);
return IP_VER(ip_hdr->ip_ihl_ver) == 4;
} else {
@@ -864,7 +864,7 @@ miniflow_extract(struct dp_packet *packet, struct miniflow *dst)
/* Initialize packet's layer pointer and offsets. */
frame = data;
- tunneling = dp_packet_hwol_is_tunnel(packet);
+ tunneling = dp_packet_tunnel(packet);
if (tunneling) {
/* Preserve inner offsets from previous circulation. */
dp_packet_reset_outer_offsets(packet);
@@ -2662,13 +2662,8 @@ netdev_dpdk_prep_hwol_packet(struct netdev_dpdk *dev, struct rte_mbuf *mbuf)
const uint64_t all_outer_requests = (RTE_MBUF_F_TX_OUTER_IP_CKSUM |
RTE_MBUF_F_TX_OUTER_UDP_CKSUM);
const uint64_t all_requests = all_inner_requests | all_outer_requests;
- const uint64_t all_outer_marks = RTE_MBUF_F_TX_TUNNEL_MASK;
- const uint64_t all_marks = all_outer_marks;
if (!(mbuf->ol_flags & all_requests)) {
- /* No offloads requested, no marks should be set. */
- mbuf->ol_flags &= ~all_marks;
-
uint64_t unexpected = mbuf->ol_flags & RTE_MBUF_F_TX_OFFLOAD_MASK;
if (OVS_UNLIKELY(unexpected)) {
VLOG_WARN_RL(&rl, "%s: Unexpected Tx offload flags: %#"PRIx64,
@@ -2680,25 +2675,23 @@ netdev_dpdk_prep_hwol_packet(struct netdev_dpdk *dev, struct rte_mbuf *mbuf)
return true;
}
- const uint64_t tunnel_type = mbuf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK;
- if (OVS_UNLIKELY(tunnel_type &&
- tunnel_type != RTE_MBUF_F_TX_TUNNEL_GENEVE &&
- tunnel_type != RTE_MBUF_F_TX_TUNNEL_GRE &&
- tunnel_type != RTE_MBUF_F_TX_TUNNEL_VXLAN)) {
- VLOG_WARN_RL(&rl, "%s: Unexpected tunnel type: %#"PRIx64,
- netdev_get_name(&dev->up), tunnel_type);
- netdev_dpdk_mbuf_dump(netdev_get_name(&dev->up),
- "Packet with unexpected tunnel type", mbuf);
- return false;
- }
-
- if (tunnel_type && (mbuf->ol_flags & all_inner_requests)) {
+ if (dp_packet_tunnel(pkt)
+ && (mbuf->ol_flags & all_inner_requests)) {
if (mbuf->ol_flags & all_outer_requests) {
mbuf->outer_l2_len = (char *) dp_packet_l3(pkt) -
(char *) dp_packet_eth(pkt);
mbuf->outer_l3_len = (char *) dp_packet_l4(pkt) -
(char *) dp_packet_l3(pkt);
+ if (dp_packet_tunnel_geneve(pkt)) {
+ mbuf->ol_flags |= RTE_MBUF_F_TX_TUNNEL_GENEVE;
+ } else if (dp_packet_tunnel_vxlan(pkt)) {
+ mbuf->ol_flags |= RTE_MBUF_F_TX_TUNNEL_VXLAN;
+ } else {
+ ovs_assert(dp_packet_tunnel_gre(pkt));
+ mbuf->ol_flags |= RTE_MBUF_F_TX_TUNNEL_GRE;
+ }
+
ip = dp_packet_l3(pkt);
mbuf->ol_flags |= IP_VER(ip->ip_ihl_ver) == 4
? RTE_MBUF_F_TX_OUTER_IPV4
@@ -2709,8 +2702,6 @@ netdev_dpdk_prep_hwol_packet(struct netdev_dpdk *dev, struct rte_mbuf *mbuf)
l3 = dp_packet_inner_l3(pkt);
l4 = dp_packet_inner_l4(pkt);
} else {
- /* If no outer offloading is requested, clear outer marks. */
- mbuf->ol_flags &= ~all_outer_marks;
mbuf->outer_l2_len = 0;
mbuf->outer_l3_len = 0;
@@ -2720,7 +2711,7 @@ netdev_dpdk_prep_hwol_packet(struct netdev_dpdk *dev, struct rte_mbuf *mbuf)
l4 = dp_packet_inner_l4(pkt);
}
} else {
- if (tunnel_type) {
+ if (dp_packet_tunnel(pkt)) {
/* No inner offload is requested, fallback to non tunnel
* checksum offloads. */
if (mbuf->ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) {
@@ -2754,7 +2745,7 @@ netdev_dpdk_prep_hwol_packet(struct netdev_dpdk *dev, struct rte_mbuf *mbuf)
int hdr_len;
mbuf->l4_len = TCP_OFFSET(th->tcp_ctl) * 4;
- if (tunnel_type) {
+ if (dp_packet_tunnel(pkt)) {
link_tso_segsz = dev->mtu - mbuf->l2_len - mbuf->l3_len -
mbuf->l4_len - mbuf->outer_l3_len;
} else {
@@ -7236,9 +7236,9 @@ netdev_linux_prepend_vnet_hdr(struct dp_packet *b, int mtu)
bool l4_is_good = dp_packet_l4_checksum_good(b);
- if ((dp_packet_hwol_is_tunnel_vxlan(b) ||
- dp_packet_hwol_is_tunnel_geneve(b)) &&
- dp_packet_hwol_tx_l4_checksum(b)) {
+ if ((dp_packet_tunnel_vxlan(b)
+ || dp_packet_tunnel_geneve(b))
+ && dp_packet_hwol_tx_l4_checksum(b)) {
/* This condition is needed because dp-packet doesn't currently track
* outer and inner checksum statuses seperately. In the case of these
* two tunnel types we can end up setting outer l4 as good but still
@@ -210,7 +210,7 @@ netdev_tnl_push_ip_header(struct dp_packet *packet, const void *header,
ip = netdev_tnl_ip_hdr(eth);
ip->ip_tot_len = htons(*ip_tot_size);
/* Postpone checksum to when the packet is pushed to the port. */
- if (dp_packet_hwol_is_tunnel(packet)) {
+ if (dp_packet_tunnel(packet)) {
dp_packet_hwol_set_tx_outer_ipv4_csum(packet);
} else {
dp_packet_hwol_set_tx_ip_csum(packet);
@@ -287,12 +287,12 @@ dp_packet_tnl_ol_process(struct dp_packet *packet,
}
if (data->tnl_type == OVS_VPORT_TYPE_GENEVE) {
- dp_packet_hwol_set_tunnel_geneve(packet);
+ dp_packet_tunnel_set_geneve(packet);
} else if (data->tnl_type == OVS_VPORT_TYPE_VXLAN) {
- dp_packet_hwol_set_tunnel_vxlan(packet);
+ dp_packet_tunnel_set_vxlan(packet);
} else if (data->tnl_type == OVS_VPORT_TYPE_GRE ||
data->tnl_type == OVS_VPORT_TYPE_IP6GRE) {
- dp_packet_hwol_set_tunnel_gre(packet);
+ dp_packet_tunnel_set_gre(packet);
}
}
@@ -320,8 +320,8 @@ netdev_tnl_push_udp_header(const struct netdev *netdev OVS_UNUSED,
if (udp->udp_csum) {
dp_packet_ol_reset_l4_csum_good(packet);
- if (dp_packet_hwol_is_tunnel_geneve(packet) ||
- dp_packet_hwol_is_tunnel_vxlan(packet)) {
+ if (dp_packet_tunnel_geneve(packet)
+ || dp_packet_tunnel_vxlan(packet)) {
dp_packet_hwol_set_outer_udp_csum(packet);
} else {
dp_packet_hwol_set_csum_udp(packet);
@@ -919,17 +919,17 @@ netdev_send(struct netdev *netdev, int qid, struct dp_packet_batch *batch,
NETDEV_TX_GRE_TNL_TSO |
NETDEV_TX_GENEVE_TNL_TSO))) {
DP_PACKET_BATCH_FOR_EACH (i, packet, batch) {
- if (dp_packet_hwol_is_tso(packet) &&
- dp_packet_hwol_is_tunnel(packet)) {
+ if (dp_packet_hwol_is_tso(packet)
+ && dp_packet_tunnel(packet)) {
return netdev_send_tso(netdev, qid, batch, concurrent_txq);
}
}
} else if (!(netdev_flags & NETDEV_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
DP_PACKET_BATCH_FOR_EACH (i, packet, batch) {
- if (dp_packet_hwol_is_tso(packet) &&
- (dp_packet_hwol_is_tunnel_vxlan(packet) ||
- dp_packet_hwol_is_tunnel_geneve(packet)) &&
- dp_packet_hwol_is_outer_udp_cksum(packet)) {
+ if (dp_packet_hwol_is_tso(packet)
+ && (dp_packet_tunnel_vxlan(packet)
+ || dp_packet_tunnel_geneve(packet))
+ && dp_packet_hwol_is_outer_udp_cksum(packet)) {
return netdev_send_tso(netdev, qid, batch, concurrent_txq);
}
}
@@ -1025,7 +1025,7 @@ netdev_push_header(const struct netdev *netdev,
data->tnl_type != OVS_VPORT_TYPE_GRE &&
data->tnl_type != OVS_VPORT_TYPE_IP6GRE) {
dp_packet_ol_send_prepare(packet, 0);
- } else if (dp_packet_hwol_is_tunnel(packet)) {
+ } else if (dp_packet_tunnel(packet)) {
if (dp_packet_hwol_is_tso(packet)) {
COVERAGE_INC(netdev_push_header_drops);
dp_packet_delete(packet);
Rather than set bits in the mbuf ol_flags field, that only makes sense for netdev-dpdk ports, mark packet for tunnel offload in OVS offloads API. While at it, since there is nothing really "hardware" related, rename current API for consistency with dp_packet_tunnel_ prefix. Signed-off-by: David Marchand <david.marchand@redhat.com> --- Changes since v1: - renamed dp_packet_is_tunnel as dp_packet_tunnel, --- lib/dp-packet-gso.c | 8 +-- lib/dp-packet.c | 6 +- lib/dp-packet.h | 131 ++++++++++++++++++---------------------- lib/flow.c | 2 +- lib/netdev-dpdk.c | 35 ++++------- lib/netdev-linux.c | 6 +- lib/netdev-native-tnl.c | 12 ++-- lib/netdev.c | 14 ++--- 8 files changed, 97 insertions(+), 117 deletions(-)