@@ -6330,18 +6330,17 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
#endif
if ((mss = skb_shinfo(skb)->gso_size)) {
- u32 tcp_opt_len;
- struct iphdr *iph;
+ struct tcphdr *th = tcp_hdr(skb);
+ int tcp_opt_words = th->doff - (sizeof(*th) >> 2);
+ /* assumes positive tcp_opt_words without checking */
vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
- tcp_opt_len = tcp_optlen(skb);
-
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
u32 tcp_off = skb_transport_offset(skb) -
sizeof(struct ipv6hdr) - ETH_HLEN;
- vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
+ vlan_tag_flags |= (tcp_opt_words << 8) |
TX_BD_FLAGS_SW_FLAGS;
if (likely(tcp_off == 0))
vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
@@ -6354,10 +6353,15 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
}
} else {
- iph = ip_hdr(skb);
- if (tcp_opt_len || (iph->ihl > 5)) {
- vlan_tag_flags |= ((iph->ihl - 5) +
- (tcp_opt_len >> 2)) << 8;
+ struct iphdr *iph = ip_hdr(skb);
+ int ip_opt_words = iph->ihl - (sizeof(*iph) >> 2);
+ int opt_words;
+
+ /* assumes positive ip_opt_words without checking */
+ opt_words = ip_opt_words + tcp_opt_words;
+
+ if (opt_words > 0) {
+ vlan_tag_flags |= opt_words << 8;
}
}
} else
@@ -5230,7 +5230,8 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
else {
struct iphdr *iph = ip_hdr(skb);
- tcp_opt_len = tcp_optlen(skb);
+ tcp_opt_len = tcp_option_len_th(tcp_hdr(skb));
+ /* assumes positive tcp_opt_len without checking */
ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
iph->check = 0;
@@ -5392,7 +5393,8 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
mss = 0;
if ((mss = skb_shinfo(skb)->gso_size) != 0) {
struct iphdr *iph;
- int tcp_opt_len, ip_tcp_len, hdr_len;
+ int tcp_opt_len, ip_hdr_len, ip_opt_len, ip_tcp_len, hdr_len;
+ int opt_bytes;
if (skb_header_cloned(skb) &&
pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
@@ -5400,10 +5402,12 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
goto out_unlock;
}
- tcp_opt_len = tcp_optlen(skb);
- ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
-
+ tcp_opt_len = tcp_option_len_th(tcp_hdr(skb));
+ /* assumes positive tcp_opt_len without checking */
+ ip_hdr_len = ip_hdrlen(skb);
+ ip_tcp_len = ip_hdr_len + sizeof(struct tcphdr);
hdr_len = ip_tcp_len + tcp_opt_len;
+
if (unlikely((ETH_HLEN + hdr_len) > 80) &&
(tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
return (tg3_tso_bug(tp, skb));
@@ -5423,20 +5427,18 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
IPPROTO_TCP,
0);
+ ip_opt_len = ip_hdr_len - sizeof(struct iphdr);
+ /* assumes positive ip_opt_len without checking */
+ opt_bytes = ip_opt_len + tcp_opt_len;
+
if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
- if (tcp_opt_len || iph->ihl > 5) {
- int tsflags;
-
- tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
- mss |= (tsflags << 11);
+ if (opt_bytes > 0) {
+ mss |= (opt_bytes >> 2) << 11;
}
} else {
- if (tcp_opt_len || iph->ihl > 5) {
- int tsflags;
-
- tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
- base_flags |= tsflags << 12;
+ if (opt_bytes > 0) {
+ base_flags |= (opt_bytes >> 2) << 12;
}
}
}
@@ -217,9 +217,15 @@ static inline unsigned int tcp_hdrlen(const struct sk_buff *skb)
return tcp_hdr(skb)->doff * 4;
}
-static inline unsigned int tcp_optlen(const struct sk_buff *skb)
+static inline unsigned int tcp_header_len_th(const struct tcphdr *th)
{
- return (tcp_hdr(skb)->doff - 5) * 4;
+ return th->doff * 4;
+}
+
+/* When doff is bad, this could be negative. */
+static inline int tcp_option_len_th(const struct tcphdr *th)
+{
+ return (int)tcp_header_len_th(th) - sizeof(*th);
}
/* This defines a selective acknowledgement block. */