@@ -2095,7 +2095,7 @@ conn_key_extract(struct conntrack *ct, struct dp_packet *pkt, ovs_be16 dl_type,
COVERAGE_INC(conntrack_l3csum_err);
} else {
bool hwol_good_l3_csum = dp_packet_ip_csum_good(pkt)
- || dp_packet_hwol_is_ipv4(pkt);
+ || dp_packet_ol_is_ipv4(pkt);
/* Validate the checksum only when hwol is not supported. */
ok = extract_l3_ipv4(&ctx->key, l3, dp_packet_l3_size(pkt), NULL,
!hwol_good_l3_csum);
@@ -2110,7 +2110,7 @@ conn_key_extract(struct conntrack *ct, struct dp_packet *pkt, ovs_be16 dl_type,
bool hwol_bad_l4_csum = dp_packet_l4_csum_bad(pkt);
if (!hwol_bad_l4_csum) {
bool hwol_good_l4_csum = dp_packet_l4_csum_good(pkt)
- || dp_packet_hwol_tx_l4_csum(pkt);
+ || dp_packet_ol_tx_l4_csum(pkt);
/* Validate the checksum only when hwol is not supported. */
if (extract_l4(&ctx->key, l4, dp_packet_l4_size(pkt),
&ctx->icmp_related, l3, !hwol_good_l4_csum,
@@ -3402,7 +3402,7 @@ handle_ftp_ctl(struct conntrack *ct, const struct conn_lookup_ctx *ctx,
}
if (seq_skew) {
ip_len = ntohs(l3_hdr->ip_tot_len) + seq_skew;
- if (!dp_packet_hwol_is_ipv4(pkt)) {
+ if (!dp_packet_ol_is_ipv4(pkt)) {
l3_hdr->ip_csum = recalc_csum16(l3_hdr->ip_csum,
l3_hdr->ip_tot_len,
htons(ip_len));
@@ -3424,7 +3424,7 @@ handle_ftp_ctl(struct conntrack *ct, const struct conn_lookup_ctx *ctx,
}
th->tcp_csum = 0;
- if (!dp_packet_hwol_tx_l4_csum(pkt)) {
+ if (!dp_packet_ol_tx_l4_csum(pkt)) {
if (ctx->key.dl_type == htons(ETH_TYPE_IPV6)) {
th->tcp_csum = packet_csum_upperlayer6(nh6, th, ctx->key.nw_proto,
dp_packet_l4_size(pkt));
@@ -958,35 +958,35 @@ dp_packet_set_flow_mark(struct dp_packet *p, uint32_t mark)
/* Returns the L4 cksum offload bitmask. */
static inline uint64_t
-dp_packet_hwol_l4_mask(const struct dp_packet *b)
+dp_packet_ol_l4_mask(const struct dp_packet *b)
{
return *dp_packet_ol_flags_ptr(b) & DP_PACKET_OL_TX_L4_MASK;
}
/* Return true if the packet 'b' requested L4 checksum offload. */
static inline bool
-dp_packet_hwol_tx_l4_csum(const struct dp_packet *b)
+dp_packet_ol_tx_l4_csum(const struct dp_packet *b)
{
- return !!dp_packet_hwol_l4_mask(b);
+ return !!dp_packet_ol_l4_mask(b);
}
/* Returns 'true' if packet 'b' is marked for TCP segmentation offloading. */
static inline bool
-dp_packet_hwol_is_tso(const struct dp_packet *b)
+dp_packet_ol_is_tso(const struct dp_packet *b)
{
return !!(*dp_packet_ol_flags_ptr(b) & DP_PACKET_OL_TX_TCP_SEG);
}
/* Returns 'true' if packet 'b' is marked for IPv4 checksum offloading. */
static inline bool
-dp_packet_hwol_is_ipv4(const struct dp_packet *b)
+dp_packet_ol_is_ipv4(const struct dp_packet *b)
{
return !!(*dp_packet_ol_flags_ptr(b) & DP_PACKET_OL_TX_IPV4);
}
/* Returns 'true' if packet 'b' is marked for TCP checksum offloading. */
static inline bool
-dp_packet_hwol_l4_is_tcp(const struct dp_packet *b)
+dp_packet_ol_l4_is_tcp(const struct dp_packet *b)
{
return (*dp_packet_ol_flags_ptr(b) & DP_PACKET_OL_TX_L4_MASK) ==
DP_PACKET_OL_TX_TCP_CSUM;
@@ -994,7 +994,7 @@ dp_packet_hwol_l4_is_tcp(const struct dp_packet *b)
/* Returns 'true' if packet 'b' is marked for UDP checksum offloading. */
static inline bool
-dp_packet_hwol_l4_is_udp(struct dp_packet *b)
+dp_packet_ol_l4_is_udp(struct dp_packet *b)
{
return (*dp_packet_ol_flags_ptr(b) & DP_PACKET_OL_TX_L4_MASK) ==
DP_PACKET_OL_TX_UDP_CSUM;
@@ -1002,7 +1002,7 @@ dp_packet_hwol_l4_is_udp(struct dp_packet *b)
/* Returns 'true' if packet 'b' is marked for SCTP checksum offloading. */
static inline bool
-dp_packet_hwol_l4_is_sctp(struct dp_packet *b)
+dp_packet_ol_l4_is_sctp(struct dp_packet *b)
{
return (*dp_packet_ol_flags_ptr(b) & DP_PACKET_OL_TX_L4_MASK) ==
DP_PACKET_OL_TX_SCTP_CSUM;
@@ -1010,14 +1010,14 @@ dp_packet_hwol_l4_is_sctp(struct dp_packet *b)
/* Mark packet 'b' for IPv4 checksum offloading. */
static inline void
-dp_packet_hwol_set_tx_ipv4(struct dp_packet *b)
+dp_packet_ol_set_tx_ipv4(struct dp_packet *b)
{
*dp_packet_ol_flags_ptr(b) |= DP_PACKET_OL_TX_IPV4;
}
/* Mark packet 'b' for IPv6 checksum offloading. */
static inline void
-dp_packet_hwol_set_tx_ipv6(struct dp_packet *b)
+dp_packet_ol_set_tx_ipv6(struct dp_packet *b)
{
*dp_packet_ol_flags_ptr(b) |= DP_PACKET_OL_TX_IPV6;
}
@@ -1025,7 +1025,7 @@ dp_packet_hwol_set_tx_ipv6(struct dp_packet *b)
/* Mark packet 'b' for TCP checksum offloading. It implies that either
* the packet 'b' is marked for IPv4 or IPv6 checksum offloading. */
static inline void
-dp_packet_hwol_set_csum_tcp(struct dp_packet *b)
+dp_packet_ol_set_csum_tcp(struct dp_packet *b)
{
*dp_packet_ol_flags_ptr(b) |= DP_PACKET_OL_TX_TCP_CSUM;
}
@@ -1033,7 +1033,7 @@ dp_packet_hwol_set_csum_tcp(struct dp_packet *b)
/* Mark packet 'b' for UDP checksum offloading. It implies that either
* the packet 'b' is marked for IPv4 or IPv6 checksum offloading. */
static inline void
-dp_packet_hwol_set_csum_udp(struct dp_packet *b)
+dp_packet_ol_set_csum_udp(struct dp_packet *b)
{
*dp_packet_ol_flags_ptr(b) |= DP_PACKET_OL_TX_UDP_CSUM;
}
@@ -1041,7 +1041,7 @@ dp_packet_hwol_set_csum_udp(struct dp_packet *b)
/* Mark packet 'b' for SCTP checksum offloading. It implies that either
* the packet 'b' is marked for IPv4 or IPv6 checksum offloading. */
static inline void
-dp_packet_hwol_set_csum_sctp(struct dp_packet *b)
+dp_packet_ol_set_csum_sctp(struct dp_packet *b)
{
*dp_packet_ol_flags_ptr(b) |= DP_PACKET_OL_TX_SCTP_CSUM;
}
@@ -1050,7 +1050,7 @@ dp_packet_hwol_set_csum_sctp(struct dp_packet *b)
* either the packet 'b' is marked for IPv4 or IPv6 checksum offloading
* and also for TCP checksum offloading. */
static inline void
-dp_packet_hwol_set_tcp_seg(struct dp_packet *b)
+dp_packet_ol_set_tcp_seg(struct dp_packet *b)
{
*dp_packet_ol_flags_ptr(b) |= DP_PACKET_OL_TX_TCP_SEG;
}
@@ -433,7 +433,7 @@ ipf_reassemble_v4_frags(struct ipf_list *ipf_list)
len += rest_len;
l3 = dp_packet_l3(pkt);
ovs_be16 new_ip_frag_off = l3->ip_frag_off & ~htons(IP_MORE_FRAGMENTS);
- if (!dp_packet_hwol_is_ipv4(pkt)) {
+ if (!dp_packet_ol_is_ipv4(pkt)) {
l3->ip_csum = recalc_csum16(l3->ip_csum, l3->ip_frag_off,
new_ip_frag_off);
l3->ip_csum = recalc_csum16(l3->ip_csum, l3->ip_tot_len, htons(len));
@@ -609,7 +609,7 @@ ipf_is_valid_v4_frag(struct ipf *ipf, struct dp_packet *pkt)
}
if (OVS_UNLIKELY(!dp_packet_ip_csum_good(pkt)
- && !dp_packet_hwol_is_ipv4(pkt)
+ && !dp_packet_ol_is_ipv4(pkt)
&& csum(l3, ip_hdr_len) != 0)) {
COVERAGE_INC(ipf_l3csum_err);
goto invalid_pkt;
@@ -1185,7 +1185,7 @@ ipf_post_execute_reass_pkts(struct ipf *ipf,
} else {
struct ip_header *l3_frag = dp_packet_l3(frag_i->pkt);
struct ip_header *l3_reass = dp_packet_l3(pkt);
- if (!dp_packet_hwol_is_ipv4(frag_i->pkt)) {
+ if (!dp_packet_ol_is_ipv4(frag_i->pkt)) {
ovs_be32 reass_ip =
get_16aligned_be32(&l3_reass->ip_src);
ovs_be32 frag_ip =
@@ -205,7 +205,7 @@ struct netdev_dpdk_sw_stats {
/* Packet drops in ingress policer processing. */
uint64_t rx_qos_drops;
/* Packet drops in HWOL processing. */
- uint64_t tx_invalid_hwol_drops;
+ uint64_t tx_invalid_ol_drops;
};
enum dpdk_dev_type {
@@ -2161,7 +2161,7 @@ netdev_dpdk_rxq_dealloc(struct netdev_rxq *rxq)
/* Prepare the packet for HWOL.
* Return True if the packet is OK to continue. */
static bool
-netdev_dpdk_prep_hwol_packet(struct netdev_dpdk *dev, struct rte_mbuf *mbuf)
+netdev_dpdk_prep_ol_packet(struct netdev_dpdk *dev, struct rte_mbuf *mbuf)
{
struct dp_packet *pkt = CONTAINER_OF(mbuf, struct dp_packet, mbuf);
@@ -2195,7 +2195,7 @@ netdev_dpdk_prep_hwol_packet(struct netdev_dpdk *dev, struct rte_mbuf *mbuf)
/* Prepare a batch for HWOL.
* Return the number of good packets in the batch. */
static int
-netdev_dpdk_prep_hwol_batch(struct netdev_dpdk *dev, struct rte_mbuf **pkts,
+netdev_dpdk_prep_ol_batch(struct netdev_dpdk *dev, struct rte_mbuf **pkts,
int pkt_cnt)
{
int i = 0;
@@ -2205,7 +2205,7 @@ netdev_dpdk_prep_hwol_batch(struct netdev_dpdk *dev, struct rte_mbuf **pkts,
/* Prepare and filter bad HWOL packets. */
for (i = 0; i < pkt_cnt; i++) {
pkt = pkts[i];
- if (!netdev_dpdk_prep_hwol_packet(dev, pkt)) {
+ if (!netdev_dpdk_prep_ol_packet(dev, pkt)) {
rte_pktmbuf_free(pkt);
continue;
}
@@ -2559,7 +2559,7 @@ netdev_dpdk_vhost_update_tx_counters(struct netdev_dpdk *dev,
int dropped = sw_stats_add->tx_mtu_exceeded_drops +
sw_stats_add->tx_qos_drops +
sw_stats_add->tx_failure_drops +
- sw_stats_add->tx_invalid_hwol_drops;
+ sw_stats_add->tx_invalid_ol_drops;
struct netdev_stats *stats = &dev->stats;
int sent = attempted - dropped;
int i;
@@ -2578,7 +2578,7 @@ netdev_dpdk_vhost_update_tx_counters(struct netdev_dpdk *dev,
sw_stats->tx_failure_drops += sw_stats_add->tx_failure_drops;
sw_stats->tx_mtu_exceeded_drops += sw_stats_add->tx_mtu_exceeded_drops;
sw_stats->tx_qos_drops += sw_stats_add->tx_qos_drops;
- sw_stats->tx_invalid_hwol_drops += sw_stats_add->tx_invalid_hwol_drops;
+ sw_stats->tx_invalid_ol_drops += sw_stats_add->tx_invalid_ol_drops;
}
}
@@ -2610,12 +2610,12 @@ __netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
}
- sw_stats_add.tx_invalid_hwol_drops = cnt;
+ sw_stats_add.tx_invalid_ol_drops = cnt;
if (userspace_tso_enabled()) {
- cnt = netdev_dpdk_prep_hwol_batch(dev, cur_pkts, cnt);
+ cnt = netdev_dpdk_prep_ol_batch(dev, cur_pkts, cnt);
}
- sw_stats_add.tx_invalid_hwol_drops -= cnt;
+ sw_stats_add.tx_invalid_ol_drops -= cnt;
sw_stats_add.tx_mtu_exceeded_drops = cnt;
cnt = netdev_dpdk_filter_packet_len(dev, cur_pkts, cnt);
sw_stats_add.tx_mtu_exceeded_drops -= cnt;
@@ -2887,7 +2887,7 @@ netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
hwol_drops = batch_cnt;
if (userspace_tso_enabled()) {
- batch_cnt = netdev_dpdk_prep_hwol_batch(dev, pkts, batch_cnt);
+ batch_cnt = netdev_dpdk_prep_ol_batch(dev, pkts, batch_cnt);
}
hwol_drops -= batch_cnt;
mtu_drops = batch_cnt;
@@ -2906,7 +2906,7 @@ netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
sw_stats->tx_failure_drops += tx_failure;
sw_stats->tx_mtu_exceeded_drops += mtu_drops;
sw_stats->tx_qos_drops += qos_drops;
- sw_stats->tx_invalid_hwol_drops += hwol_drops;
+ sw_stats->tx_invalid_ol_drops += hwol_drops;
rte_spinlock_unlock(&dev->stats_lock);
}
}
@@ -3249,7 +3249,7 @@ netdev_dpdk_get_sw_custom_stats(const struct netdev *netdev,
SW_CSTAT(tx_mtu_exceeded_drops) \
SW_CSTAT(tx_qos_drops) \
SW_CSTAT(rx_qos_drops) \
- SW_CSTAT(tx_invalid_hwol_drops)
+ SW_CSTAT(tx_invalid_ol_drops)
#define SW_CSTAT(NAME) + 1
custom_stats->size = SW_CSTATS;
@@ -6601,7 +6601,7 @@ netdev_linux_parse_l2(struct dp_packet *b, uint16_t *l4proto)
}
*l4proto = ip_hdr->ip_proto;
- dp_packet_hwol_set_tx_ipv4(b);
+ dp_packet_ol_set_tx_ipv4(b);
} else if (eth_type == htons(ETH_TYPE_IPV6)) {
struct ovs_16aligned_ip6_hdr *nh6;
@@ -6611,7 +6611,7 @@ netdev_linux_parse_l2(struct dp_packet *b, uint16_t *l4proto)
}
*l4proto = nh6->ip6_ctlun.ip6_un1.ip6_un1_nxt;
- dp_packet_hwol_set_tx_ipv6(b);
+ dp_packet_ol_set_tx_ipv6(b);
}
return 0;
@@ -6637,11 +6637,11 @@ netdev_linux_parse_vnet_hdr(struct dp_packet *b)
if (vnet->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
if (l4proto == IPPROTO_TCP) {
- dp_packet_hwol_set_csum_tcp(b);
+ dp_packet_ol_set_csum_tcp(b);
} else if (l4proto == IPPROTO_UDP) {
- dp_packet_hwol_set_csum_udp(b);
+ dp_packet_ol_set_csum_udp(b);
} else if (l4proto == IPPROTO_SCTP) {
- dp_packet_hwol_set_csum_sctp(b);
+ dp_packet_ol_set_csum_sctp(b);
}
}
@@ -6653,7 +6653,7 @@ netdev_linux_parse_vnet_hdr(struct dp_packet *b)
if (type == VIRTIO_NET_HDR_GSO_TCPV4
|| type == VIRTIO_NET_HDR_GSO_TCPV6) {
- dp_packet_hwol_set_tcp_seg(b);
+ dp_packet_ol_set_tcp_seg(b);
}
}
@@ -6665,13 +6665,13 @@ netdev_linux_prepend_vnet_hdr(struct dp_packet *b, int mtu)
{
struct virtio_net_hdr *vnet = dp_packet_push_zeros(b, sizeof *vnet);
- if (dp_packet_hwol_is_tso(b)) {
+ if (dp_packet_ol_is_tso(b)) {
uint16_t hdr_len = ((char *)dp_packet_l4(b) - (char *)dp_packet_eth(b))
+ TCP_HEADER_LEN;
vnet->hdr_len = (OVS_FORCE __virtio16)hdr_len;
vnet->gso_size = (OVS_FORCE __virtio16)(mtu - hdr_len);
- if (dp_packet_hwol_is_ipv4(b)) {
+ if (dp_packet_ol_is_ipv4(b)) {
vnet->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
} else {
vnet->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
@@ -6681,18 +6681,18 @@ netdev_linux_prepend_vnet_hdr(struct dp_packet *b, int mtu)
vnet->flags = VIRTIO_NET_HDR_GSO_NONE;
}
- if (dp_packet_hwol_l4_mask(b)) {
+ if (dp_packet_ol_l4_mask(b)) {
vnet->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
vnet->csum_start = (OVS_FORCE __virtio16)((char *)dp_packet_l4(b)
- (char *)dp_packet_eth(b));
- if (dp_packet_hwol_l4_is_tcp(b)) {
+ if (dp_packet_ol_l4_is_tcp(b)) {
vnet->csum_offset = (OVS_FORCE __virtio16) __builtin_offsetof(
struct tcp_header, tcp_csum);
- } else if (dp_packet_hwol_l4_is_udp(b)) {
+ } else if (dp_packet_ol_l4_is_udp(b)) {
vnet->csum_offset = (OVS_FORCE __virtio16) __builtin_offsetof(
struct udp_header, udp_csum);
- } else if (dp_packet_hwol_l4_is_sctp(b)) {
+ } else if (dp_packet_ol_l4_is_sctp(b)) {
vnet->csum_offset = (OVS_FORCE __virtio16) __builtin_offsetof(
struct sctp_header, sctp_csum);
} else {
@@ -794,28 +794,28 @@ netdev_send_prepare_packet(const uint64_t netdev_flags,
{
uint64_t l4_mask;
- if (dp_packet_hwol_is_tso(packet)
+ if (dp_packet_ol_is_tso(packet)
&& !(netdev_flags & NETDEV_OFFLOAD_TX_TCP_TSO)) {
/* Fall back to GSO in software. */
VLOG_ERR_BUF(errormsg, "No TSO support");
return false;
}
- l4_mask = dp_packet_hwol_l4_mask(packet);
+ l4_mask = dp_packet_ol_l4_mask(packet);
if (l4_mask) {
- if (dp_packet_hwol_l4_is_tcp(packet)) {
+ if (dp_packet_ol_l4_is_tcp(packet)) {
if (!(netdev_flags & NETDEV_OFFLOAD_TX_TCP_CSUM)) {
/* Fall back to TCP csum in software. */
VLOG_ERR_BUF(errormsg, "No TCP checksum support");
return false;
}
- } else if (dp_packet_hwol_l4_is_udp(packet)) {
+ } else if (dp_packet_ol_l4_is_udp(packet)) {
if (!(netdev_flags & NETDEV_OFFLOAD_TX_UDP_CSUM)) {
/* Fall back to UDP csum in software. */
VLOG_ERR_BUF(errormsg, "No UDP checksum support");
return false;
}
- } else if (dp_packet_hwol_l4_is_sctp(packet)) {
+ } else if (dp_packet_ol_l4_is_sctp(packet)) {
if (!(netdev_flags & NETDEV_OFFLOAD_TX_SCTP_CSUM)) {
/* Fall back to SCTP csum in software. */
VLOG_ERR_BUF(errormsg, "No SCTP checksum support");
@@ -960,8 +960,8 @@ netdev_push_header(const struct netdev *netdev,
size_t i, size = dp_packet_batch_size(batch);
DP_PACKET_BATCH_REFILL_FOR_EACH (i, size, packet, batch) {
- if (OVS_UNLIKELY(dp_packet_hwol_is_tso(packet)
- || dp_packet_hwol_l4_mask(packet))) {
+ if (OVS_UNLIKELY(dp_packet_ol_is_tso(packet)
+ || dp_packet_ol_l4_mask(packet))) {
COVERAGE_INC(netdev_push_header_drops);
dp_packet_delete(packet);
VLOG_WARN_RL(&rl, "%s: Tunneling packets with HW offload flags is "
The name correlates better with the flag names. Signed-off-by: Flavio Leitner <fbl@sysclose.org> --- lib/conntrack.c | 8 ++++---- lib/dp-packet.h | 28 ++++++++++++++-------------- lib/ipf.c | 6 +++--- lib/netdev-dpdk.c | 24 ++++++++++++------------ lib/netdev-linux.c | 24 ++++++++++++------------ lib/netdev.c | 14 +++++++------- 6 files changed, 52 insertions(+), 52 deletions(-)