diff mbox

[06/14] bna: TX Path and RX Path Changes

Message ID 1313529591-3718-7-git-send-email-rmody@brocade.com
State Changes Requested, archived
Delegated to: David Miller
Headers show

Commit Message

Rasesh Mody Aug. 16, 2011, 9:19 p.m. UTC
Change details:
 - Disable and enable interrupts from the same polling context to prevent
   reordering in Rx path.
 - Add Rx NAPI debug counters.
 - Make NAPI budget check more generic
 - Add a macro bnad_dim_timer_stop for DIM(Dynamic Interrupt Moderation)
   timer stop
 - Handle reduced MSI-X vectors case in bnad_enable_msix
 - Replace existing checks with macros and add more checks for illegal skbs
   in transmit path. Add more tx_skb counters for dropped skbs.
 - Check for single frame TSO skbs and send them out as non-TSO.
 - Put memory barrier after bna_txq_prod_indx_doorbell()

Signed-off-by: Gurunatha Karaje <gkaraje@brocade.com>
Signed-off-by: Rasesh Mody <rmody@brocade.com>
---
 drivers/net/ethernet/brocade/bna/bnad.c |  207 +++++++++++++++++--------------
 drivers/net/ethernet/brocade/bna/bnad.h |   33 +++++-
 2 files changed, 148 insertions(+), 92 deletions(-)

Comments

Ben Hutchings Aug. 16, 2011, 9:45 p.m. UTC | #1
On Tue, 2011-08-16 at 14:19 -0700, Rasesh Mody wrote:
> Change details:
>  - Disable and enable interrupts from the same polling context to prevent
>    reordering in Rx path.
>  - Add Rx NAPI debug counters.
>  - Make NAPI budget check more generic
>  - Add a macro bnad_dim_timer_stop for DIM(Dynamic Interrupt Moderation)
>    timer stop
>  - Handle reduced MSI-X vectors case in bnad_enable_msix
>  - Replace existing checks with macros and add more checks for illegal skbs
>    in transmit path. Add more tx_skb counters for dropped skbs.
>  - Check for single frame TSO skbs and send them out as non-TSO.
>  - Put memory barrier after bna_txq_prod_indx_doorbell()
> 
> Signed-off-by: Gurunatha Karaje <gkaraje@brocade.com>
> Signed-off-by: Rasesh Mody <rmody@brocade.com>
> ---
>  drivers/net/ethernet/brocade/bna/bnad.c |  207 +++++++++++++++++--------------
>  drivers/net/ethernet/brocade/bna/bnad.h |   33 +++++-
>  2 files changed, 148 insertions(+), 92 deletions(-)
> 
> diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
> index 64e2106..0faa8a1 100644
> --- a/drivers/net/ethernet/brocade/bna/bnad.c
> +++ b/drivers/net/ethernet/brocade/bna/bnad.c
> @@ -532,7 +532,7 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
>  		      (flags & BNA_CQ_EF_L4_CKSUM_OK)))
>  			skb->ip_summed = CHECKSUM_UNNECESSARY;
>  		else
> -			skb_checksum_none_assert(skb);
> +			skb->ip_summed = CHECKSUM_NONE;
>  
>  		rcb->rxq->rx_packets++;
>  		rcb->rxq->rx_bytes += skb->len;
[...]

This is reverting part of:

commit bc8acf2c8c3e43fcc192762a9f964b3e9a17748b
Author: Eric Dumazet <eric.dumazet@gmail.com>
Date:   Thu Sep 2 13:07:41 2010 -0700

    drivers/net: avoid some skb->ip_summed initializations

and I don't see any justification for that.

Ben.
Rasesh Mody Aug. 16, 2011, 11:28 p.m. UTC | #2
>From: Ben Hutchings [mailto:bhutchings@solarflare.com]

>Sent: Tuesday, August 16, 2011 2:46 PM

>

>On Tue, 2011-08-16 at 14:19 -0700, Rasesh Mody wrote:

>> Change details:

>>  - Disable and enable interrupts from the same polling context to

>prevent

>>    reordering in Rx path.

>>  - Add Rx NAPI debug counters.

>>  - Make NAPI budget check more generic

>>  - Add a macro bnad_dim_timer_stop for DIM(Dynamic Interrupt

>Moderation)

>>    timer stop

>>  - Handle reduced MSI-X vectors case in bnad_enable_msix

>>  - Replace existing checks with macros and add more checks for illegal

>skbs

>>    in transmit path. Add more tx_skb counters for dropped skbs.

>>  - Check for single frame TSO skbs and send them out as non-TSO.

>>  - Put memory barrier after bna_txq_prod_indx_doorbell()

>>

>> Signed-off-by: Gurunatha Karaje <gkaraje@brocade.com>

>> Signed-off-by: Rasesh Mody <rmody@brocade.com>

>> ---

>>  drivers/net/ethernet/brocade/bna/bnad.c |  207 +++++++++++++++++-----

>---------

>>  drivers/net/ethernet/brocade/bna/bnad.h |   33 +++++-

>>  2 files changed, 148 insertions(+), 92 deletions(-)

>>

>> diff --git a/drivers/net/ethernet/brocade/bna/bnad.c

>b/drivers/net/ethernet/brocade/bna/bnad.c

>> index 64e2106..0faa8a1 100644

>> --- a/drivers/net/ethernet/brocade/bna/bnad.c

>> +++ b/drivers/net/ethernet/brocade/bna/bnad.c

>> @@ -532,7 +532,7 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb

>*ccb, int budget)

>>  		      (flags & BNA_CQ_EF_L4_CKSUM_OK)))

>>  			skb->ip_summed = CHECKSUM_UNNECESSARY;

>>  		else

>> -			skb_checksum_none_assert(skb);

>> +			skb->ip_summed = CHECKSUM_NONE;

>>

>>  		rcb->rxq->rx_packets++;

>>  		rcb->rxq->rx_bytes += skb->len;

>[...]

>

>This is reverting part of:

>

>commit bc8acf2c8c3e43fcc192762a9f964b3e9a17748b

>Author: Eric Dumazet <eric.dumazet@gmail.com>

>Date:   Thu Sep 2 13:07:41 2010 -0700

>

>    drivers/net: avoid some skb->ip_summed initializations

>

>and I don't see any justification for that.


It is an erroneous change, this change will be dropped when resubmitting
this patch set.
Thanks,
Rasesh
diff mbox

Patch

diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 64e2106..0faa8a1 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -532,7 +532,7 @@  bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
 		      (flags & BNA_CQ_EF_L4_CKSUM_OK)))
 			skb->ip_summed = CHECKSUM_UNNECESSARY;
 		else
-			skb_checksum_none_assert(skb);
+			skb->ip_summed = CHECKSUM_NONE;
 
 		rcb->rxq->rx_packets++;
 		rcb->rxq->rx_bytes += skb->len;
@@ -555,7 +555,8 @@  next:
 	BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
 
 	if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
-		bna_ib_ack(ccb->i_dbell, packets);
+		bna_ib_ack_disable_irq(ccb->i_dbell, packets);
+
 	bnad_refill_rxq(bnad, ccb->rcb[0]);
 	if (ccb->rcb[1])
 		bnad_refill_rxq(bnad, ccb->rcb[1]);
@@ -593,10 +594,9 @@  bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
 	struct napi_struct *napi = &rx_ctrl->napi;
 
 	if (likely(napi_schedule_prep(napi))) {
-		bnad_disable_rx_irq(bnad, ccb);
 		__napi_schedule(napi);
+		rx_ctrl->rx_schedule++;
 	}
-	BNAD_UPDATE_CTR(bnad, netif_rx_schedule);
 }
 
 /* MSIX Rx Path Handler */
@@ -605,8 +605,10 @@  bnad_msix_rx(int irq, void *data)
 {
 	struct bna_ccb *ccb = (struct bna_ccb *)data;
 
-	if (ccb)
+	if (ccb) {
+		((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++;
 		bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
+	}
 
 	return IRQ_HANDLED;
 }
@@ -1675,22 +1677,23 @@  bnad_napi_poll_rx(struct napi_struct *napi, int budget)
 	struct bnad *bnad = rx_ctrl->bnad;
 	int rcvd = 0;
 
+	rx_ctrl->rx_poll_ctr++;
 
 	if (!netif_carrier_ok(bnad->netdev))
 		goto poll_exit;
 
 	rcvd = bnad_poll_cq(bnad, rx_ctrl->ccb, budget);
-	if (rcvd == budget)
+	if (rcvd >= budget)
 		return rcvd;
 
 poll_exit:
 	napi_complete((napi));
 
-	BNAD_UPDATE_CTR(bnad, netif_rx_complete);
-
+	rx_ctrl->rx_complete++;
 
 	if (rx_ctrl->ccb)
-		bnad_enable_rx_irq(bnad, rx_ctrl->ccb);
+		bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
+
 	return rcvd;
 }
 
@@ -1894,20 +1897,14 @@  bnad_cleanup_rx(struct bnad *bnad, u32 rx_id)
 	struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
 	struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
 	unsigned long flags;
-	int dim_timer_del = 0;
 
 	if (!rx_info->rx)
 		return;
 
-	if (0 == rx_id) {
-		spin_lock_irqsave(&bnad->bna_lock, flags);
-		dim_timer_del = bnad_dim_timer_running(bnad);
-		if (dim_timer_del)
-			clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
-		spin_unlock_irqrestore(&bnad->bna_lock, flags);
-		if (dim_timer_del)
-			del_timer_sync(&bnad->dim_timer);
-	}
+	spin_lock_irqsave(&bnad->bna_lock, flags);
+	if (0 == rx_id)
+		bnad_dim_timer_stop(bnad, flags);
+	spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
 	init_completion(&bnad->bnad_completions.rx_comp);
 	spin_lock_irqsave(&bnad->bna_lock, flags);
@@ -2408,12 +2405,11 @@  bnad_enable_msix(struct bnad *bnad)
 
 		spin_lock_irqsave(&bnad->bna_lock, flags);
 		/* ret = #of vectors that we got */
-		bnad_q_num_adjust(bnad, ret, 0);
+		bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
+			(ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
-		bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx)
-			+ (bnad->num_rx
-			* bnad->num_rxp_per_rx) +
+		bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
 			 BNAD_MAILBOX_MSIX_VECTORS;
 
 		if (bnad->msix_num > ret)
@@ -2571,26 +2567,21 @@  bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
 	u32		unmap_prod, wis, wis_used, wi_range;
 	u32		vectors, vect_id, i, acked;
 	int			err;
+	unsigned int		len;
 
 	dma_addr_t		dma_addr;
 	struct bna_txq_entry *txqent;
 	u16	flags;
 
-	if (unlikely
-	    (skb->len <= ETH_HLEN || skb->len > BFI_TX_MAX_DATA_PER_PKT)) {
-		dev_kfree_skb(skb);
-		return NETDEV_TX_OK;
-	}
+	BNAD_DROP_AND_RETURN_IF(skb->len <= ETH_HLEN, tx_skb_too_short);
+	BNAD_DROP_AND_RETURN_IF(skb_headlen(skb) > BFI_TX_MAX_DATA_PER_VECTOR,
+				tx_skb_headlen_too_long);
+	BNAD_DROP_AND_RETURN_IF(skb_headlen(skb) == 0, tx_skb_headlen_zero);
 
 	txq_id = skb_get_queue_mapping(skb);
 
 	tcb = bnad->tx_info[0].tcb[txq_id];
 
-	if (unlikely(!tcb)) {
-		dev_kfree_skb(skb);
-		return NETDEV_TX_OK;
-	}
-
 	unmap_q = tcb->unmap_q;
 	/*
 	 * Takes care of the Tx that is scheduled between clearing the flag
@@ -2638,8 +2629,6 @@  bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
 	}
 
 	unmap_prod = unmap_q->producer_index;
-	wis_used = 1;
-	vect_id = 0;
 	flags = 0;
 
 	txq_prod = tcb->producer_index;
@@ -2647,9 +2636,6 @@  bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
 	BUG_ON(!(wi_range <= tcb->q_depth));
 	txqent->hdr.wi.reserved = 0;
 	txqent->hdr.wi.num_vectors = vectors;
-	txqent->hdr.wi.opcode =
-		htons((skb_is_gso(skb) ? BNA_TXQ_WI_SEND_LSO :
-		       BNA_TXQ_WI_SEND));
 
 	if (vlan_tx_tag_present(skb)) {
 		vlan_tag = (u16) vlan_tx_tag_get(skb);
@@ -2665,62 +2651,74 @@  bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
 	txqent->hdr.wi.vlan_tag = htons(vlan_tag);
 
 	if (skb_is_gso(skb)) {
-		err = bnad_tso_prepare(bnad, skb);
-		if (err) {
-			dev_kfree_skb(skb);
-			return NETDEV_TX_OK;
+		BNAD_DROP_AND_RETURN_IF(skb_is_gso(skb) > netdev->mtu,
+					tx_skb_mss_too_long);
+		if (unlikely((skb_is_gso(skb) + skb_transport_offset(skb) +
+			      tcp_hdrlen(skb)) >= skb->len)) {
+			txqent->hdr.wi.opcode =
+				__constant_htons(BNA_TXQ_WI_SEND);
+			txqent->hdr.wi.lso_mss = 0;
+			BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
+		} else {
+			txqent->hdr.wi.opcode =
+				__constant_htons(BNA_TXQ_WI_SEND_LSO);
+			txqent->hdr.wi.lso_mss = htons(skb_is_gso(skb));
 		}
-		txqent->hdr.wi.lso_mss = htons(skb_is_gso(skb));
+
+		err = bnad_tso_prepare(bnad, skb);
+		BNAD_DROP_AND_RETURN_IF(err, tx_skb_tso_prepare);
 		flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
 		txqent->hdr.wi.l4_hdr_size_n_offset =
 			htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
 			      (tcp_hdrlen(skb) >> 2,
 			       skb_transport_offset(skb)));
-	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
-		u8 proto = 0;
-
+	} else {
+		txqent->hdr.wi.opcode =	__constant_htons(BNA_TXQ_WI_SEND);
 		txqent->hdr.wi.lso_mss = 0;
 
-		if (skb->protocol == htons(ETH_P_IP))
-			proto = ip_hdr(skb)->protocol;
-		else if (skb->protocol == htons(ETH_P_IPV6)) {
-			/* nexthdr may not be TCP immediately. */
-			proto = ipv6_hdr(skb)->nexthdr;
-		}
-		if (proto == IPPROTO_TCP) {
-			flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
-			txqent->hdr.wi.l4_hdr_size_n_offset =
-				htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
-				      (0, skb_transport_offset(skb)));
-
-			BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
-
-			BUG_ON(!(skb_headlen(skb) >=
-				skb_transport_offset(skb) + tcp_hdrlen(skb)));
+		BNAD_DROP_AND_RETURN_IF(skb->len > (netdev->mtu + ETH_HLEN),
+					tx_skb_non_tso_too_long);
 
-		} else if (proto == IPPROTO_UDP) {
-			flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
-			txqent->hdr.wi.l4_hdr_size_n_offset =
-				htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
-				      (0, skb_transport_offset(skb)));
+		if (skb->ip_summed == CHECKSUM_PARTIAL) {
+			u8 proto = 0;
 
-			BNAD_UPDATE_CTR(bnad, udpcsum_offload);
-
-			BUG_ON(!(skb_headlen(skb) >=
-				   skb_transport_offset(skb) +
-				   sizeof(struct udphdr)));
-		} else {
-			err = skb_checksum_help(skb);
-			BNAD_UPDATE_CTR(bnad, csum_help);
-			if (err) {
-				dev_kfree_skb(skb);
-				BNAD_UPDATE_CTR(bnad, csum_help_err);
-				return NETDEV_TX_OK;
+			if (skb->protocol == __constant_htons(ETH_P_IP))
+				proto = ip_hdr(skb)->protocol;
+			else if (skb->protocol ==
+				 __constant_htons(ETH_P_IPV6)) {
+				/* nexthdr may not be TCP immediately. */
+				proto = ipv6_hdr(skb)->nexthdr;
+			}
+			if (proto == IPPROTO_TCP) {
+				flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
+				txqent->hdr.wi.l4_hdr_size_n_offset =
+					htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+					      (0, skb_transport_offset(skb)));
+
+				BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
+
+				BNAD_DROP_AND_RETURN_IF(skb_headlen(skb) <
+					    skb_transport_offset(skb) +
+					    tcp_hdrlen(skb), tx_skb_tcp_hdr);
+
+			} else if (proto == IPPROTO_UDP) {
+				flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
+				txqent->hdr.wi.l4_hdr_size_n_offset =
+					htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
+					      (0, skb_transport_offset(skb)));
+
+				BNAD_UPDATE_CTR(bnad, udpcsum_offload);
+				BNAD_DROP_AND_RETURN_IF(skb_headlen(skb) <
+					    skb_transport_offset(skb) +
+					    sizeof(struct udphdr),
+							tx_skb_udp_hdr);
+
+			} else {
+				BNAD_DROP_AND_RETURN(tx_skb_csum_err);
 			}
+		} else {
+			txqent->hdr.wi.l4_hdr_size_n_offset = 0;
 		}
-	} else {
-		txqent->hdr.wi.lso_mss = 0;
-		txqent->hdr.wi.l4_hdr_size_n_offset = 0;
 	}
 
 	txqent->hdr.wi.flags = htons(flags);
@@ -2728,20 +2726,36 @@  bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
 	txqent->hdr.wi.frame_length = htonl(skb->len);
 
 	unmap_q->unmap_array[unmap_prod].skb = skb;
-	BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
-	txqent->vector[vect_id].length = htons(skb_headlen(skb));
+	len = skb_headlen(skb);
+	txqent->vector[0].length = htons(len);
 	dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
 				  skb_headlen(skb), DMA_TO_DEVICE);
 	dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
 			   dma_addr);
 
-	BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
+	BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
 	BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
 
+	vect_id = 0;
+	wis_used = 1;
+
 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
 		u16		size = frag->size;
 
+		if (unlikely(size == 0)) {
+			unmap_prod = unmap_q->producer_index;
+			prefetch(&unmap_q->unmap_array[unmap_prod + 1]);
+
+			BNAD_PCI_UNMAP_SKB(&bnad->pcidev->dev,
+					   unmap_q->unmap_array,
+					   unmap_prod, unmap_q->q_depth, skb,
+					   i);
+			BNAD_DROP_AND_RETURN(tx_skb_frag_zero);
+		}
+
+		len += size;
+
 		if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
 			vect_id = 0;
 			if (--wi_range)
@@ -2752,10 +2766,10 @@  bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
 				wis_used = 0;
 				BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
 						     txqent, wi_range);
-				BUG_ON(!(wi_range <= tcb->q_depth));
 			}
 			wis_used++;
-			txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
+			txqent->hdr.wi_ext.opcode =
+				__constant_htons(BNA_TXQ_WI_EXTENSION);
 		}
 
 		BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
@@ -2768,6 +2782,16 @@  bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
 		BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
 	}
 
+	if (unlikely(len != skb->len)) {
+		unmap_prod = unmap_q->producer_index;
+		prefetch(&unmap_q->unmap_array[unmap_prod + 1]);
+
+		BNAD_PCI_UNMAP_SKB(&bnad->pcidev->dev, unmap_q->unmap_array,
+				   unmap_prod, unmap_q->q_depth, skb,
+				   skb_shinfo(skb)->nr_frags);
+		BNAD_DROP_AND_RETURN(tx_skb_len_mismatch);
+	}
+
 	unmap_q->producer_index = unmap_prod;
 	BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
 	tcb->producer_index = txq_prod;
@@ -2778,6 +2802,7 @@  bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
 		return NETDEV_TX_OK;
 
 	bna_txq_prod_indx_doorbell(tcb);
+	smp_mb();
 
 	if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
 		tasklet_schedule(&bnad->tx_free_tasklet);
@@ -2888,6 +2913,9 @@  bnad_set_rx_mode(struct net_device *netdev)
 		}
 	}
 
+	if (bnad->rx_info[0].rx == NULL)
+		goto unlock;
+
 	bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
 
 	if (!netdev_mc_empty(netdev)) {
@@ -3040,12 +3068,9 @@  bnad_netpoll(struct net_device *netdev)
 				continue;
 			for (j = 0; j < bnad->num_rxp_per_rx; j++) {
 				rx_ctrl = &rx_info->rx_ctrl[j];
-				if (rx_ctrl->ccb) {
-					bnad_disable_rx_irq(bnad,
-							    rx_ctrl->ccb);
+				if (rx_ctrl->ccb)
 					bnad_netif_rx_schedule_poll(bnad,
 							    rx_ctrl->ccb);
-				}
 			}
 		}
 	}
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index 6074c6d..f402149 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -55,6 +55,11 @@  struct bnad_rx_ctrl {
 	struct bnad *bnad;
 	unsigned long  flags;
 	struct napi_struct	napi;
+	u64		rx_intr_ctr;
+	u64		rx_poll_ctr;
+	u64		rx_schedule;
+	u64		rx_keep_poll;
+	u64		rx_complete;
 };
 
 #define BNAD_RXMODE_PROMISC_DEFAULT	BNA_RXMODE_PROMISC
@@ -150,8 +155,20 @@  struct bnad_drv_stats {
 	u64		udpcsum_offload;
 	u64		csum_help;
 	u64		csum_help_err;
+	u64		tx_skb_too_short;
 	u64		tx_skb_stopping;
 	u64		tx_skb_max_vectors;
+	u64		tx_skb_mss_too_long;
+	u64		tx_skb_tso_too_short;
+	u64		tx_skb_tso_prepare;
+	u64		tx_skb_non_tso_too_long;
+	u64		tx_skb_tcp_hdr;
+	u64		tx_skb_udp_hdr;
+	u64		tx_skb_csum_err;
+	u64		tx_skb_headlen_too_long;
+	u64		tx_skb_headlen_zero;
+	u64		tx_skb_frag_zero;
+	u64		tx_skb_len_mismatch;
 
 	u64		hw_stats_updates;
 	u64		netif_rx_schedule;
@@ -359,7 +376,7 @@  extern void bnad_netdev_hwstats_fill(struct bnad *bnad,
 
 #define bnad_enable_rx_irq_unsafe(_ccb)			\
 {							\
-	if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) {\
+	if (likely(test_bit(BNAD_RXQ_STARTED, &(_ccb)->rcb[0]->flags))) {\
 		bna_ib_coalescing_timer_set((_ccb)->i_dbell,	\
 			(_ccb)->rx_coalescing_timeo);		\
 		bna_ib_ack((_ccb)->i_dbell, 0);			\
@@ -369,5 +386,19 @@  extern void bnad_netdev_hwstats_fill(struct bnad *bnad,
 #define bnad_dim_timer_running(_bnad)				\
 	(((_bnad)->cfg_flags & BNAD_CF_DIM_ENABLED) &&		\
 	(test_bit(BNAD_RF_DIM_TIMER_RUNNING, &((_bnad)->run_flags))))
+#define bnad_dim_timer_stop(_bnad, _flags)		\
+do {							\
+	int to_del = 0;					\
+							\
+	if ((_bnad)->cfg_flags & BNAD_CF_DIM_ENABLED &&	\
+	    test_bit(BNAD_RF_DIM_TIMER_RUNNING, &(_bnad)->run_flags)) {\
+		clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &(_bnad)->run_flags);\
+		to_del = 1;				\
+	}						\
+	spin_unlock_irqrestore(&(_bnad)->bna_lock, (_flags));	\
+	if (to_del)					\
+		del_timer_sync(&(_bnad)->dim_timer);	\
+	spin_lock_irqsave(&(_bnad)->bna_lock, (_flags));\
+} while (0)
 
 #endif /* __BNAD_H__ */